Upgrade yapf to 0.20.0

Upgrade yapf version to 0.20.0 and reformat Python files.
diff --git a/tools/buildgen/bunch.py b/tools/buildgen/bunch.py
index 0ce9a6b..f3bfc81 100755
--- a/tools/buildgen/bunch.py
+++ b/tools/buildgen/bunch.py
@@ -48,5 +48,6 @@
     elif isinstance(dst, list) and isinstance(add, list):
         dst.extend(add)
     else:
-        raise Exception('Tried to merge incompatible objects %s %s\n\n%r\n\n%r'
-                        % (type(dst).__name__, type(add).__name__, dst, add))
+        raise Exception(
+            'Tried to merge incompatible objects %s %s\n\n%r\n\n%r' %
+            (type(dst).__name__, type(add).__name__, dst, add))
diff --git a/tools/buildgen/mako_renderer.py b/tools/buildgen/mako_renderer.py
index 0569fa1..acd72bd 100755
--- a/tools/buildgen/mako_renderer.py
+++ b/tools/buildgen/mako_renderer.py
@@ -99,10 +99,10 @@
         elif opt == '-P':
             assert not got_preprocessed_input
             assert json_dict == {}
-            sys.path.insert(
-                0,
-                os.path.abspath(
-                    os.path.join(os.path.dirname(sys.argv[0]), 'plugins')))
+            sys.path.insert(0,
+                            os.path.abspath(
+                                os.path.join(
+                                    os.path.dirname(sys.argv[0]), 'plugins')))
             with open(arg, 'r') as dict_file:
                 dictionary = pickle.load(dict_file)
             got_preprocessed_input = True
diff --git a/tools/buildgen/plugins/expand_filegroups.py b/tools/buildgen/plugins/expand_filegroups.py
index 886a59c..99d9463 100755
--- a/tools/buildgen/plugins/expand_filegroups.py
+++ b/tools/buildgen/plugins/expand_filegroups.py
@@ -104,8 +104,7 @@
 
     # build reverse dependency map
     things = {}
-    for thing in dictionary['libs'] + dictionary['targets'] + dictionary[
-            'filegroups']:
+    for thing in dictionary['libs'] + dictionary['targets'] + dictionary['filegroups']:
         things[thing['name']] = thing
         thing['used_by'] = []
     thing_deps = lambda t: t.get('uses', []) + t.get('filegroups', []) + t.get('deps', [])
@@ -148,7 +147,7 @@
                 lib[lst] = vals
             lib['plugins'] = plugins
         if lib.get('generate_plugin_registry', False):
-            lib['src'].append('src/core/plugin_registry/%s_plugin_registry.cc' %
-                              lib['name'])
+            lib['src'].append(
+                'src/core/plugin_registry/%s_plugin_registry.cc' % lib['name'])
         for lst in FILEGROUP_LISTS:
             lib[lst] = uniquify(lib.get(lst, []))
diff --git a/tools/buildgen/plugins/generate_vsprojects.py b/tools/buildgen/plugins/generate_vsprojects.py
index f6251d4..f7ef492 100755
--- a/tools/buildgen/plugins/generate_vsprojects.py
+++ b/tools/buildgen/plugins/generate_vsprojects.py
@@ -56,11 +56,12 @@
             target['vs_props'] = []
         target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
         if target.get('vs_project_guid',
-                      None) is None and 'windows' in target.get('platforms',
-                                                                ['windows']):
+                      None) is None and 'windows' in target.get(
+                          'platforms', ['windows']):
             name = target['name']
             guid = re.sub('(........)(....)(....)(....)(.*)',
-                          r'{\1-\2-\3-\4-\5}', hashlib.md5(name).hexdigest())
+                          r'{\1-\2-\3-\4-\5}',
+                          hashlib.md5(name).hexdigest())
             target['vs_project_guid'] = guid.upper()
     # Exclude projects without a visual project guid, such as the tests.
     projects = [
@@ -69,9 +70,9 @@
 
     projects = [
         project for project in projects
-        if project['language'] != 'c++' or project['build'] == 'all' or project[
-            'build'] == 'protoc' or (project['language'] == 'c++' and (project[
-                'build'] == 'test' or project['build'] == 'private'))
+        if project['language'] != 'c++' or project['build'] == 'all' or
+        project['build'] == 'protoc' or (project['language'] == 'c++' and (
+            project['build'] == 'test' or project['build'] == 'private'))
     ]
 
     project_dict = dict([(p['name'], p) for p in projects])
diff --git a/tools/buildgen/plugins/transitive_dependencies.py b/tools/buildgen/plugins/transitive_dependencies.py
index 5373bca..258e10b 100644
--- a/tools/buildgen/plugins/transitive_dependencies.py
+++ b/tools/buildgen/plugins/transitive_dependencies.py
@@ -54,5 +54,5 @@
                 target['transitive_deps'] = transitive_deps(target, libs)
 
     python_dependencies = dictionary.get('python_dependencies')
-    python_dependencies['transitive_deps'] = (
-        transitive_deps(python_dependencies, libs))
+    python_dependencies['transitive_deps'] = (transitive_deps(
+        python_dependencies, libs))
diff --git a/tools/codegen/core/gen_settings_ids.py b/tools/codegen/core/gen_settings_ids.py
index bc43806..5c3f066 100755
--- a/tools/codegen/core/gen_settings_ids.py
+++ b/tools/codegen/core/gen_settings_ids.py
@@ -174,10 +174,13 @@
         print >> C, "{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},"
         i += 1
     print >> C, "{\"%s\", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s}," % (
-        decorated_setting.name, decorated_setting.setting.default,
-        decorated_setting.setting.min, decorated_setting.setting.max,
+        decorated_setting.name,
+        decorated_setting.setting.default,
+        decorated_setting.setting.min,
+        decorated_setting.setting.max,
         decorated_setting.setting.on_error.behavior,
-        decorated_setting.setting.on_error.code,)
+        decorated_setting.setting.on_error.code,
+    )
     i += 1
 print >> C, "};"
 
diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py
index 9d99bbf..3a171bb 100755
--- a/tools/codegen/core/gen_static_metadata.py
+++ b/tools/codegen/core/gen_static_metadata.py
@@ -387,8 +387,8 @@
     print >> H, '#define %s (grpc_static_slice_table[%d])' % (
         mangle(elem).upper(), i)
 print >> H
-print >> C, 'static uint8_t g_bytes[] = {%s};' % (
-    ','.join('%d' % ord(c) for c in ''.join(all_strs)))
+print >> C, 'static uint8_t g_bytes[] = {%s};' % (','.join(
+    '%d' % ord(c) for c in ''.join(all_strs)))
 print >> C
 print >> C, 'static void static_ref(void *unused) {}'
 print >> C, 'static void static_unref(void *unused) {}'
@@ -444,8 +444,8 @@
 print >> H
 print >> C, ('uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] '
              '= {')
-print >> C, '  %s' % ','.join('%d' % static_userdata.get(elem, 0)
-                              for elem in all_elems)
+print >> C, '  %s' % ','.join(
+    '%d' % static_userdata.get(elem, 0) for elem in all_elems)
 print >> C, '};'
 print >> C
 
@@ -520,8 +520,8 @@
     idxs[h] = i
 print >> C, 'static const uint16_t elem_keys[] = {%s};' % ','.join(
     '%d' % k for k in keys)
-print >> C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join('%d' % i
-                                                                  for i in idxs)
+print >> C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join(
+    '%d' % i for i in idxs)
 print >> C
 
 print >> H, 'grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);'
@@ -579,8 +579,8 @@
     1 << len(STREAM_COMPRESSION_ALGORITHMS))
 print >> C, 'const uint8_t grpc_static_accept_stream_encoding_metadata[%d] = {' % (
     1 << len(STREAM_COMPRESSION_ALGORITHMS))
-print >> C, '0,%s' % ','.join('%d' % md_idx(elem)
-                              for elem in stream_compression_elems)
+print >> C, '0,%s' % ','.join(
+    '%d' % md_idx(elem) for elem in stream_compression_elems)
 print >> C, '};'
 
 print >> H, '#define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(algs) (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[grpc_static_accept_stream_encoding_metadata[(algs)]], GRPC_MDELEM_STORAGE_STATIC))'
diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py
index 93f2035..5c9d9e5 100755
--- a/tools/codegen/core/gen_stats_data.py
+++ b/tools/codegen/core/gen_stats_data.py
@@ -28,8 +28,8 @@
 
 
 def make_type(name, fields):
-    return (collections.namedtuple(
-        name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
+    return (collections.namedtuple(name, ' '.join(
+        list(set(REQUIRED_FIELDS + fields)))), [])
 
 
 def c_str(s, encoding='ascii'):
@@ -44,7 +44,10 @@
     return '"' + result + '"'
 
 
-types = (make_type('Counter', []), make_type('Histogram', ['max', 'buckets']),)
+types = (
+    make_type('Counter', []),
+    make_type('Histogram', ['max', 'buckets']),
+)
 
 inst_map = dict((t[0].__name__, t[1]) for t in types)
 
@@ -349,8 +352,8 @@
     print >> C, "const int grpc_stats_histo_start[%d] = {%s};" % (
         len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
     print >> C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
-        len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x
-                                             for x in histo_bucket_boundaries))
+        len(inst_map['Histogram']), ','.join(
+            'grpc_stats_table_%d' % x for x in histo_bucket_boundaries))
     print >> C, "void (*const grpc_stats_inc_histogram[%d])(int x) = {%s};" % (
         len(inst_map['Histogram']), ','.join(
             'grpc_stats_inc_%s' % histogram.name.lower()
diff --git a/tools/debug/core/error_ref_leak.py b/tools/debug/core/error_ref_leak.py
index 7806338..7e206c2 100644
--- a/tools/debug/core/error_ref_leak.py
+++ b/tools/debug/core/error_ref_leak.py
@@ -39,7 +39,7 @@
         elif line[0] == "realloc":
             errs.remove(line[1])
             errs.append(line[3])
-        # explicitly look for the last dereference 
+        # explicitly look for the last dereference
         elif line[1] == "1" and line[3] == "0":
             assert (err in errs)
             errs.remove(err)
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index f6e7362..8f782e0 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -84,13 +84,15 @@
     # census.proto copied from github
     'tools/grpcz/census.proto',
     # status.proto copied from googleapis
-    'src/proto/grpc/status/status.proto',))
+    'src/proto/grpc/status/status.proto',
+))
 
 RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+) gRPC authors.'
-RE_LICENSE = dict((k, r'\n'.join(
-    LICENSE_PREFIX[k] + (RE_YEAR
-                         if re.search(RE_YEAR, line) else re.escape(line))
-    for line in LICENSE_NOTICE)) for k, v in LICENSE_PREFIX.iteritems())
+RE_LICENSE = dict(
+    (k, r'\n'.join(LICENSE_PREFIX[k] +
+                   (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
+                   for line in LICENSE_NOTICE))
+    for k, v in LICENSE_PREFIX.iteritems())
 
 if args.precommit:
     FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
diff --git a/tools/distrib/check_include_guards.py b/tools/distrib/check_include_guards.py
index 24e076a..6fc606f 100755
--- a/tools/distrib/check_include_guards.py
+++ b/tools/distrib/check_include_guards.py
@@ -95,14 +95,14 @@
         # Does the guard end with a '_H'?
         running_guard = match.group(1)
         if not running_guard.endswith('_H'):
-            fcontents = self.fail(fpath, match.re, match.string,
-                                  match.group(1), valid_guard, fix)
+            fcontents = self.fail(fpath, match.re, match.string, match.group(1),
+                                  valid_guard, fix)
             if fix: save(fpath, fcontents)
 
         # Is it the expected one based on the file path?
         if running_guard != valid_guard:
-            fcontents = self.fail(fpath, match.re, match.string,
-                                  match.group(1), valid_guard, fix)
+            fcontents = self.fail(fpath, match.re, match.string, match.group(1),
+                                  valid_guard, fix)
             if fix: save(fpath, fcontents)
 
         # Is there a #define? Is it the same as the #ifndef one?
@@ -114,8 +114,8 @@
 
         # Is the #define guard the same as the #ifndef guard?
         if match.group(1) != running_guard:
-            fcontents = self.fail(fpath, match.re, match.string,
-                                  match.group(1), valid_guard, fix)
+            fcontents = self.fail(fpath, match.re, match.string, match.group(1),
+                                  valid_guard, fix)
             if fix: save(fpath, fcontents)
 
         # Is there a properly commented #endif?
@@ -138,8 +138,8 @@
                 self.fail(fpath, endif_re, flines[-1], '', '', False)
         elif match.group(1) != running_guard:
             # Is the #endif guard the same as the #ifndef and #define guards?
-            fcontents = self.fail(fpath, endif_re, fcontents,
-                                  match.group(1), valid_guard, fix)
+            fcontents = self.fail(fpath, endif_re, fcontents, match.group(1),
+                                  valid_guard, fix)
             if fix: save(fpath, fcontents)
 
         return not self.failed  # Did the check succeed? (ie, not failed)
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/command.py b/tools/distrib/python/grpcio_tools/grpc_tools/command.py
index c0f9d93..7ede05f 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/command.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/command.py
@@ -30,8 +30,8 @@
                 proto_files.append(
                     os.path.abspath(os.path.join(root, filename)))
 
-    well_known_protos_include = pkg_resources.resource_filename('grpc_tools',
-                                                                '_proto')
+    well_known_protos_include = pkg_resources.resource_filename(
+        'grpc_tools', '_proto')
 
     for proto_file in proto_files:
         command = [
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index 342a220..c6bcee4 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -196,7 +196,8 @@
         language='c++',
         define_macros=list(DEFINE_MACROS),
         extra_compile_args=list(EXTRA_COMPILE_ARGS),
-        extra_link_args=list(EXTRA_LINK_ARGS),)
+        extra_link_args=list(EXTRA_LINK_ARGS),
+    )
     extensions = [plugin_ext]
     if BUILD_WITH_CYTHON:
         from Cython import Build
@@ -220,4 +221,5 @@
         'protobuf>=3.5.0.post1',
         'grpcio>={version}'.format(version=grpc_version.VERSION),
     ],
-    package_data=package_data(),)
+    package_data=package_data(),
+)
diff --git a/tools/distrib/python/make_grpcio_tools.py b/tools/distrib/python/make_grpcio_tools.py
index 216492a..4847233 100755
--- a/tools/distrib/python/make_grpcio_tools.py
+++ b/tools/distrib/python/make_grpcio_tools.py
@@ -107,12 +107,14 @@
      `out_file`."""
     cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
     cc_files = [
-        name[len(PROTOBUF_CC_PREFIX):] for name in cc_files_output
+        name[len(PROTOBUF_CC_PREFIX):]
+        for name in cc_files_output
         if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)
     ]
     proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY)
     proto_files = [
-        name[len(PROTOBUF_PROTO_PREFIX):] for name in proto_files_output
+        name[len(PROTOBUF_PROTO_PREFIX):]
+        for name in proto_files_output
         if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)
     ]
     commit_hash = protobuf_submodule_commit_hash()
diff --git a/tools/distrib/run_clang_tidy.py b/tools/distrib/run_clang_tidy.py
index 3ac712e..72d7956 100755
--- a/tools/distrib/run_clang_tidy.py
+++ b/tools/distrib/run_clang_tidy.py
@@ -54,7 +54,8 @@
 args = argp.parse_args()
 
 cmdline = [
-    clang_tidy, '--checks=-*,%s' % ','.join(GRPC_CHECKS),
+    clang_tidy,
+    '--checks=-*,%s' % ','.join(GRPC_CHECKS),
     '--warnings-as-errors=%s' % ','.join(GRPC_CHECKS)
 ] + ['--extra-arg-before=%s' % arg for arg in extra_args]
 
@@ -65,6 +66,7 @@
 for filename in args.files:
     jobs.append(jobset.JobSpec(
         cmdline + [filename],
-        shortname=filename,))  #verbose_success=True))
+        shortname=filename,
+    ))  #verbose_success=True))
 
 jobset.run(jobs, maxjobs=args.jobs)
diff --git a/tools/distrib/yapf_code.sh b/tools/distrib/yapf_code.sh
index fb14f36..d188a02 100755
--- a/tools/distrib/yapf_code.sh
+++ b/tools/distrib/yapf_code.sh
@@ -34,7 +34,7 @@
 PYTHON=$(realpath "${VIRTUALENV}/bin/python")
 $PYTHON -m pip install --upgrade pip==9.0.1
 $PYTHON -m pip install --upgrade futures
-$PYTHON -m pip install yapf==0.16.0
+$PYTHON -m pip install yapf==0.20.0
 
 yapf() {
     local exclusion exclusion_args=()
diff --git a/tools/failures/detect_new_failures.py b/tools/failures/detect_new_failures.py
index 87fd1d9..4063978 100644
--- a/tools/failures/detect_new_failures.py
+++ b/tools/failures/detect_new_failures.py
@@ -49,8 +49,8 @@
     if 200 <= response.getcode() < 300:
         return json.loads(response.read())
     else:
-        raise ValueError('Error ({}) accessing {}'.format(response.getcode(),
-                                                          response.geturl()))
+        raise ValueError('Error ({}) accessing {}'.format(
+            response.getcode(), response.geturl()))
 
 
 def search_gh_issues(search_term, status='open'):
@@ -92,11 +92,11 @@
         else:
             preexisting_issues = search_gh_issues(test_name)
             if preexisting_issues['total_count'] > 0:
-                print('\nFound {} issues for "{}":'.format(preexisting_issues[
-                    'total_count'], test_name))
+                print('\nFound {} issues for "{}":'.format(
+                    preexisting_issues['total_count'], test_name))
                 for issue in preexisting_issues['items']:
-                    print('\t"{}" ; URL: {}'.format(issue['title'], issue[
-                        'html_url']))
+                    print('\t"{}" ; URL: {}'.format(issue['title'],
+                                                    issue['html_url']))
             else:
                 print(
                     '\nNo preexisting issues found for "{}"'.format(test_name))
@@ -148,10 +148,11 @@
     rows = page.get('rows')
     if rows:
         return {
-            row['f'][0]['v']: Row(poll_strategy=row['f'][1]['v'],
-                                  job_name=row['f'][2]['v'],
-                                  build_id=row['f'][3]['v'],
-                                  timestamp=row['f'][4]['v'])
+            row['f'][0]['v']: Row(
+                poll_strategy=row['f'][1]['v'],
+                job_name=row['f'][2]['v'],
+                build_id=row['f'][3]['v'],
+                timestamp=row['f'][4]['v'])
             for row in rows
         }
     else:
@@ -294,13 +295,14 @@
     elif args.format == 'csv':
         if args.count_only:
             print('from_date,to_date,count')
-            print('{},{},{}'.format(dates['reporting']['begin'].isoformat(
-            ), dates['reporting']['end'].isoformat(), len(new_flakes)))
+            print('{},{},{}'.format(dates['reporting']['begin'].isoformat(),
+                                    dates['reporting']['end'].isoformat(),
+                                    len(new_flakes)))
         else:
             print_table(new_flakes, 'csv')
     else:
-        raise ValueError(
-            'Invalid argument for --format: {}'.format(args.format))
+        raise ValueError('Invalid argument for --format: {}'.format(
+            args.format))
 
 
 if __name__ == '__main__':
diff --git a/tools/github/pr_latency.py b/tools/github/pr_latency.py
index 0131e60..34870a5 100644
--- a/tools/github/pr_latency.py
+++ b/tools/github/pr_latency.py
@@ -72,9 +72,13 @@
                 errors))
     elif mode == 'csv':
         print(','.join([
-            str(pr), str(base_time), str(test_time), str(
-                int((test_time - base_time).total_seconds())), str(successes),
-            str(failures), str(errors)
+            str(pr),
+            str(base_time),
+            str(test_time),
+            str(int((test_time - base_time).total_seconds())),
+            str(successes),
+            str(failures),
+            str(errors)
         ]))
 
 
@@ -117,8 +121,7 @@
     if system == 'kokoro': string_in_target_url = 'kokoro'
     elif system == 'jenkins': string_in_target_url = 'grpc-testing'
     for status in statuses['statuses']:
-        if not status['target_url'] or string_in_target_url not in status[
-                'target_url']:
+        if not status['target_url'] or string_in_target_url not in status['target_url']:
             continue  # Ignore jenkins
         if status['state'] == 'pending': return None
         elif status['state'] == 'success': successes += 1
@@ -130,8 +133,8 @@
             latest_datetime = max(latest_datetime,
                                   parse_timestamp(status['updated_at']))
     # First status is the most recent one.
-    if any([successes, failures, errors]) and sum(
-        [successes, failures, errors]) > 15:
+    if any([successes, failures, errors
+           ]) and sum([successes, failures, errors]) > 15:
         return {
             'latest_datetime': latest_datetime,
             'successes': successes,
diff --git a/tools/interop_matrix/create_matrix_images.py b/tools/interop_matrix/create_matrix_images.py
index ad7bb59..ef9f6a5 100755
--- a/tools/interop_matrix/create_matrix_images.py
+++ b/tools/interop_matrix/create_matrix_images.py
@@ -173,8 +173,8 @@
             releases = client_matrix.get_release_tags(lang)
         else:
             # Build a particular release.
-            if args.release not in ['master'] + client_matrix.get_release_tags(
-                    lang):
+            if args.release not in ['master'
+                                   ] + client_matrix.get_release_tags(lang):
                 jobset.message(
                     'SKIPPED',
                     '%s for %s is not defined' % (args.release, lang),
diff --git a/tools/interop_matrix/run_interop_matrix_tests.py b/tools/interop_matrix/run_interop_matrix_tests.py
index ff42bd7..3391ef5 100755
--- a/tools/interop_matrix/run_interop_matrix_tests.py
+++ b/tools/interop_matrix/run_interop_matrix_tests.py
@@ -105,8 +105,8 @@
         releases = ['master'] + client_matrix.get_release_tags(lang)
     else:
         # Look for a particular release.
-        if args.release not in ['master'] + client_matrix.get_release_tags(
-                lang):
+        if args.release not in ['master'
+                               ] + client_matrix.get_release_tags(lang):
             jobset.message(
                 'SKIPPED',
                 '%s for %s is not defined' % (args.release, lang),
@@ -136,7 +136,8 @@
             'Skipped images (no-tag/unknown-tag): %d' % skipped,
             do_newline=True)
         # Filter tags based on the releases.
-        images[runtime] = [(tag, '%s:%s' % (image_path, tag)) for tag in tags
+        images[runtime] = [(tag, '%s:%s' % (image_path, tag))
+                           for tag in tags
                            if tag in releases]
     return images
 
diff --git a/tools/line_count/collect-history.py b/tools/line_count/collect-history.py
index d2d5c95..c8e33c9 100755
--- a/tools/line_count/collect-history.py
+++ b/tools/line_count/collect-history.py
@@ -31,7 +31,8 @@
 for dt in daterange(start_date, end_date):
     dmy = dt.strftime('%Y-%m-%d')
     sha1 = subprocess.check_output(
-        ['git', 'rev-list', '-n', '1', '--before=%s' % dmy, 'master']).strip()
+        ['git', 'rev-list', '-n', '1',
+         '--before=%s' % dmy, 'master']).strip()
     subprocess.check_call(['git', 'checkout', sha1])
     subprocess.check_call(['git', 'submodule', 'update'])
     subprocess.check_call(['git', 'clean', '-f', '-x', '-d'])
diff --git a/tools/line_count/summarize-history.py b/tools/line_count/summarize-history.py
index 80b0ed7..4a08599 100755
--- a/tools/line_count/summarize-history.py
+++ b/tools/line_count/summarize-history.py
@@ -32,6 +32,7 @@
     dmy = dt.strftime('%Y-%m-%d')
     print dmy
     subprocess.check_call([
-        'tools/line_count/yaml2csv.py', '-i', '../count/%s.yaml' % dmy, '-d',
-        dmy, '-o', '../count/%s.csv' % dmy
+        'tools/line_count/yaml2csv.py', '-i',
+        '../count/%s.yaml' % dmy, '-d', dmy, '-o',
+        '../count/%s.csv' % dmy
     ])
diff --git a/tools/mkowners/mkowners.py b/tools/mkowners/mkowners.py
index d8b3d3c..2ea7265 100755
--- a/tools/mkowners/mkowners.py
+++ b/tools/mkowners/mkowners.py
@@ -47,7 +47,8 @@
 #
 
 owners_files = [
-    os.path.join(root, 'OWNERS') for root, dirs, files in os.walk(git_root)
+    os.path.join(root, 'OWNERS')
+    for root, dirs, files in os.walk(git_root)
     if 'OWNERS' in files
 ]
 
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index 746b643..e084e28 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -28,9 +28,9 @@
 columns = []
 
 for row in json.loads(
-        subprocess.check_output([
-            'bq', '--format=json', 'show', 'microbenchmarks.microbenchmarks'
-        ]))['schema']['fields']:
+        subprocess.check_output(
+            ['bq', '--format=json', 'show',
+             'microbenchmarks.microbenchmarks']))['schema']['fields']:
     columns.append((row['name'], row['type'].lower()))
 
 SANITIZE = {
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_build.py b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
index a4cd617..4197ba3 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_build.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
@@ -43,7 +43,8 @@
         '-n',
         '--name',
         type=str,
-        help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
+        help=
+        'Unique name of this build. To be used as a handle to pass to the other bm* scripts'
     )
     argp.add_argument('--counters', dest='counters', action='store_true')
     argp.add_argument('--no-counters', dest='counters', action='store_false')
@@ -71,7 +72,8 @@
             subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
     os.rename(
         'bins',
-        'bm_diff_%s' % name,)
+        'bm_diff_%s' % name,
+    )
 
 
 if __name__ == '__main__':
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index cff29db..5719e42 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -22,10 +22,12 @@
     'bm_metadata', 'bm_fullstack_trickle'
 ]
 
-_INTERESTING = (
-    'cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
-    'allocs_per_iteration', 'writes_per_iteration', 'atm_cas_per_iteration',
-    'atm_add_per_iteration', 'nows_per_iteration',
-    'cli_transport_stalls_per_iteration', 'cli_stream_stalls_per_iteration',
-    'svr_transport_stalls_per_iteration', 'svr_stream_stalls_per_iteration',
-    'http2_pings_sent_per_iteration')
+_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median',
+                'locks_per_iteration', 'allocs_per_iteration',
+                'writes_per_iteration', 'atm_cas_per_iteration',
+                'atm_add_per_iteration', 'nows_per_iteration',
+                'cli_transport_stalls_per_iteration',
+                'cli_stream_stalls_per_iteration',
+                'svr_transport_stalls_per_iteration',
+                'svr_stream_stalls_per_iteration',
+                'http2_pings_sent_per_iteration')
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index b8a3b22..f975a8b 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -65,7 +65,8 @@
         '--loops',
         type=int,
         default=20,
-        help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
+        help=
+        'Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
     )
     argp.add_argument(
         '-r',
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 137c22b..96c63ba 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -78,7 +78,8 @@
         '--loops',
         type=int,
         default=10,
-        help='Number of times to loops the benchmarks. More loops cuts down on noise'
+        help=
+        'Number of times to loops the benchmarks. More loops cuts down on noise'
     )
     argp.add_argument(
         '-j',
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 08894bb..dfb9b17 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -51,7 +51,8 @@
         '-n',
         '--name',
         type=str,
-        help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
+        help=
+        'Unique name of the build to run. Needs to match the handle passed to bm_build.py'
     )
     argp.add_argument(
         '-r',
@@ -64,7 +65,8 @@
         '--loops',
         type=int,
         default=20,
-        help='Number of times to loops the benchmarks. More loops cuts down on noise'
+        help=
+        'Number of times to loops the benchmarks. More loops cuts down on noise'
     )
     argp.add_argument('--counters', dest='counters', action='store_true')
     argp.add_argument('--no-counters', dest='counters', action='store_false')
@@ -82,13 +84,14 @@
             'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests',
             '--benchmark_filter=%s' % regex
     ]).splitlines():
-        stripped_line = line.strip().replace("/", "_").replace(
-            "<", "_").replace(">", "_").replace(", ", "_")
+        stripped_line = line.strip().replace("/",
+                                             "_").replace("<", "_").replace(
+                                                 ">", "_").replace(", ", "_")
         cmd = [
             'bm_diff_%s/%s/%s' % (name, cfg, bm),
             '--benchmark_filter=^%s$' % line,
-            '--benchmark_out=%s.%s.%s.%s.%d.json' %
-            (bm, stripped_line, cfg, name, idx),
+            '--benchmark_out=%s.%s.%s.%s.%d.json' % (bm, stripped_line, cfg,
+                                                     name, idx),
             '--benchmark_out_format=json',
         ]
         jobs_list.append(
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index 1dd9f65..497d7ca 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -183,7 +183,8 @@
         context = js['context']
         if 'label' in bm:
             labels_list = [
-                s.split(':') for s in bm['label'].strip().split(' ')
+                s.split(':')
+                for s in bm['label'].strip().split(' ')
                 if len(s) and s[0] != '#'
             ]
             for el in labels_list:
diff --git a/tools/profiling/qps/qps_diff.py b/tools/profiling/qps/qps_diff.py
index 55a81f0..393f862 100755
--- a/tools/profiling/qps/qps_diff.py
+++ b/tools/profiling/qps/qps_diff.py
@@ -48,7 +48,8 @@
         '--loops',
         type=int,
         default=4,
-        help='Number of loops for each benchmark. More loops cuts down on noise')
+        help='Number of loops for each benchmark. More loops cuts down on noise'
+    )
     argp.add_argument(
         '-j',
         '--jobs',
@@ -128,8 +129,8 @@
     rows = []
     for sn in scenarios:
         mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
-        print('%s: %s=%r %s=%r mdn_diff=%r' %
-              (sn, new, new_data[sn], old, old_data[sn], mdn_diff))
+        print('%s: %s=%r %s=%r mdn_diff=%r' % (sn, new, new_data[sn], old,
+                                               old_data[sn], mdn_diff))
         s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
         if abs(s) > 3 and mdn_diff > 0.5:
             rows.append([sn, '%+d%%' % s])
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index dc0803b..efc4ca0 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -271,8 +271,8 @@
     def build_jobspec(self):
         if self.platform == 'linux':
             return create_docker_jobspec(
-                self.name,
-                'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+                self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(
+                    self.arch),
                 'tools/run_tests/artifacts/build_artifact_php.sh')
         else:
             return create_jobspec(
@@ -337,36 +337,38 @@
         for Cls in (CSharpExtArtifact, ProtocArtifact)
         for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
     ] + [
-        PythonArtifact('linux', 'x86', 'cp27-cp27m'), PythonArtifact(
-            'linux', 'x86', 'cp27-cp27mu'), PythonArtifact(
-                'linux', 'x86', 'cp34-cp34m'), PythonArtifact(
-                    'linux', 'x86', 'cp35-cp35m'), PythonArtifact(
-                        'linux', 'x86', 'cp36-cp36m'), PythonArtifact(
-                            'linux_extra', 'armv7', '2.7'), PythonArtifact(
-                                'linux_extra', 'armv7', '3.4'), PythonArtifact(
-                                    'linux_extra', 'armv7', '3.5'),
-        PythonArtifact('linux_extra', 'armv7', '3.6'), PythonArtifact(
-            'linux_extra', 'armv6', '2.7'), PythonArtifact(
-                'linux_extra', 'armv6', '3.4'), PythonArtifact(
-                    'linux_extra', 'armv6', '3.5'), PythonArtifact(
-                        'linux_extra', 'armv6', '3.6'), PythonArtifact(
-                            'linux', 'x64', 'cp27-cp27m'), PythonArtifact(
-                                'linux', 'x64', 'cp27-cp27mu'), PythonArtifact(
-                                    'linux', 'x64', 'cp34-cp34m'),
-        PythonArtifact('linux', 'x64', 'cp35-cp35m'), PythonArtifact(
-            'linux', 'x64', 'cp36-cp36m'), PythonArtifact(
-                'macos', 'x64', 'python2.7'), PythonArtifact(
-                    'macos', 'x64', 'python3.4'), PythonArtifact('macos', 'x64',
-                                                                 'python3.5'),
-        PythonArtifact('macos', 'x64', 'python3.6'), PythonArtifact(
-            'windows', 'x86', 'Python27_32bits'), PythonArtifact(
-                'windows', 'x86', 'Python34_32bits'), PythonArtifact(
-                    'windows', 'x86', 'Python35_32bits'), PythonArtifact(
-                        'windows', 'x86', 'Python36_32bits'), PythonArtifact(
-                            'windows', 'x64', 'Python27'),
-        PythonArtifact('windows', 'x64', 'Python34'), PythonArtifact(
-            'windows', 'x64', 'Python35'), PythonArtifact(
-                'windows', 'x64', 'Python36'), RubyArtifact(
-                    'linux', 'x64'), RubyArtifact('macos', 'x64'), PHPArtifact(
-                        'linux', 'x64'), PHPArtifact('macos', 'x64')
+        PythonArtifact('linux', 'x86', 'cp27-cp27m'),
+        PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
+        PythonArtifact('linux', 'x86', 'cp34-cp34m'),
+        PythonArtifact('linux', 'x86', 'cp35-cp35m'),
+        PythonArtifact('linux', 'x86', 'cp36-cp36m'),
+        PythonArtifact('linux_extra', 'armv7', '2.7'),
+        PythonArtifact('linux_extra', 'armv7', '3.4'),
+        PythonArtifact('linux_extra', 'armv7', '3.5'),
+        PythonArtifact('linux_extra', 'armv7', '3.6'),
+        PythonArtifact('linux_extra', 'armv6', '2.7'),
+        PythonArtifact('linux_extra', 'armv6', '3.4'),
+        PythonArtifact('linux_extra', 'armv6', '3.5'),
+        PythonArtifact('linux_extra', 'armv6', '3.6'),
+        PythonArtifact('linux', 'x64', 'cp27-cp27m'),
+        PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
+        PythonArtifact('linux', 'x64', 'cp34-cp34m'),
+        PythonArtifact('linux', 'x64', 'cp35-cp35m'),
+        PythonArtifact('linux', 'x64', 'cp36-cp36m'),
+        PythonArtifact('macos', 'x64', 'python2.7'),
+        PythonArtifact('macos', 'x64', 'python3.4'),
+        PythonArtifact('macos', 'x64', 'python3.5'),
+        PythonArtifact('macos', 'x64', 'python3.6'),
+        PythonArtifact('windows', 'x86', 'Python27_32bits'),
+        PythonArtifact('windows', 'x86', 'Python34_32bits'),
+        PythonArtifact('windows', 'x86', 'Python35_32bits'),
+        PythonArtifact('windows', 'x86', 'Python36_32bits'),
+        PythonArtifact('windows', 'x64', 'Python27'),
+        PythonArtifact('windows', 'x64', 'Python34'),
+        PythonArtifact('windows', 'x64', 'Python35'),
+        PythonArtifact('windows', 'x64', 'Python36'),
+        RubyArtifact('linux', 'x64'),
+        RubyArtifact('macos', 'x64'),
+        PHPArtifact('linux', 'x64'),
+        PHPArtifact('macos', 'x64')
     ])
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index 94a2d53..b2cc16a 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -106,8 +106,8 @@
         if self.platform == 'linux':
             return create_docker_jobspec(
                 self.name,
-                'tools/dockerfile/distribtest/csharp_%s_%s' % (
-                    self.docker_suffix, self.arch),
+                'tools/dockerfile/distribtest/csharp_%s_%s' %
+                (self.docker_suffix, self.arch),
                 'test/distrib/csharp/run_distrib_test%s.sh' %
                 self.script_suffix,
                 copy_rel_path='test/distrib')
@@ -260,8 +260,8 @@
     def build_jobspec(self):
         if self.platform == 'linux':
             return create_docker_jobspec(
-                self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' % (
-                    self.docker_suffix, self.arch),
+                self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' %
+                (self.docker_suffix, self.arch),
                 'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
         elif self.platform == 'windows':
             return create_jobspec(
diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py
index 5290845..abf1b5e 100644
--- a/tools/run_tests/artifacts/package_targets.py
+++ b/tools/run_tests/artifacts/package_targets.py
@@ -152,6 +152,9 @@
 def targets():
     """Gets list of supported targets"""
     return [
-        CSharpPackage(), CSharpPackage(linux=True), RubyPackage(),
-        PythonPackage(), PHPPackage()
+        CSharpPackage(),
+        CSharpPackage(linux=True),
+        RubyPackage(),
+        PythonPackage(),
+        PHPPackage()
     ]
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index 37f6e7a..790202c 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -18,8 +18,7 @@
 
 
 def massage_qps_stats(scenario_result):
-    for stats in scenario_result["serverStats"] + scenario_result[
-            "clientStats"]:
+    for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
         if "coreStats" not in stats: return
         core_stats = stats["coreStats"]
         del stats["coreStats"]
@@ -294,8 +293,8 @@
                 core_stats, "cq_ev_queue_transient_pop_failures")
         h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
         stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
-        stats["core_call_initial_size_bkts"] = ",".join("%f" % x
-                                                        for x in h.boundaries)
+        stats["core_call_initial_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
         stats[
             "core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
                 h.buckets, 50, h.boundaries)
@@ -307,8 +306,8 @@
                 h.buckets, 99, h.boundaries)
         h = massage_qps_stats_helpers.histogram(core_stats,
                                                 "poll_events_returned")
-        stats["core_poll_events_returned"] = ",".join("%f" % x
-                                                      for x in h.buckets)
+        stats["core_poll_events_returned"] = ",".join(
+            "%f" % x for x in h.buckets)
         stats["core_poll_events_returned_bkts"] = ",".join(
             "%f" % x for x in h.boundaries)
         stats[
@@ -322,8 +321,8 @@
                 h.buckets, 99, h.boundaries)
         h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
         stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
-        stats["core_tcp_write_size_bkts"] = ",".join("%f" % x
-                                                     for x in h.boundaries)
+        stats["core_tcp_write_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
         stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
             h.buckets, 50, h.boundaries)
         stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
@@ -333,8 +332,8 @@
         h = massage_qps_stats_helpers.histogram(core_stats,
                                                 "tcp_write_iov_size")
         stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
-        stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x
-                                                         for x in h.boundaries)
+        stats["core_tcp_write_iov_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
         stats[
             "core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
                 h.buckets, 50, h.boundaries)
@@ -346,8 +345,8 @@
                 h.buckets, 99, h.boundaries)
         h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
         stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
-        stats["core_tcp_read_size_bkts"] = ",".join("%f" % x
-                                                    for x in h.boundaries)
+        stats["core_tcp_read_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
         stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
             h.buckets, 50, h.boundaries)
         stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
@@ -356,8 +355,8 @@
             h.buckets, 99, h.boundaries)
         h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
         stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
-        stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x
-                                                     for x in h.boundaries)
+        stats["core_tcp_read_offer_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
         stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
             h.buckets, 50, h.boundaries)
         stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
@@ -366,8 +365,8 @@
             h.buckets, 99, h.boundaries)
         h = massage_qps_stats_helpers.histogram(core_stats,
                                                 "tcp_read_offer_iov_size")
-        stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x
-                                                         for x in h.buckets)
+        stats["core_tcp_read_offer_iov_size"] = ",".join(
+            "%f" % x for x in h.buckets)
         stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
             "%f" % x for x in h.boundaries)
         stats[
@@ -381,8 +380,8 @@
                 h.buckets, 99, h.boundaries)
         h = massage_qps_stats_helpers.histogram(core_stats,
                                                 "http2_send_message_size")
-        stats["core_http2_send_message_size"] = ",".join("%f" % x
-                                                         for x in h.buckets)
+        stats["core_http2_send_message_size"] = ",".join(
+            "%f" % x for x in h.buckets)
         stats["core_http2_send_message_size_bkts"] = ",".join(
             "%f" % x for x in h.boundaries)
         stats[
@@ -457,8 +456,8 @@
         h = massage_qps_stats_helpers.histogram(core_stats,
                                                 "server_cqs_checked")
         stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
-        stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x
-                                                         for x in h.boundaries)
+        stats["core_server_cqs_checked_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
         stats[
             "core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
                 h.buckets, 50, h.boundaries)
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index 7af33f9..f057531 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -178,8 +178,8 @@
         # clamp buffer usage to something reasonable (16 gig for now)
         MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
         if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
-            outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size,
-                                                            resp_size))
+            outstanding_calls = max(1,
+                                    MAX_MEMORY_USE / max(req_size, resp_size))
         wide = channels if channels is not None else WIDE
         deep = int(math.ceil(1.0 * outstanding_calls / wide))
 
@@ -503,8 +503,8 @@
             ]:
                 for synchronicity in ['sync', 'async']:
                     yield _ping_pong_scenario(
-                        'cpp_protobuf_%s_%s_ping_pong_%s' %
-                        (synchronicity, rpc_type, secstr),
+                        'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity,
+                                                             rpc_type, secstr),
                         rpc_type=rpc_type.upper(),
                         client_type='%s_CLIENT' % synchronicity.upper(),
                         server_type='%s_SERVER' % synchronicity.upper(),
@@ -580,10 +580,10 @@
                                 minimal_stack=not secure,
                                 categories=[SWEEP])
 
-                    for channels in geometric_progression(1, 20000,
-                                                          math.sqrt(10)):
-                        for outstanding in geometric_progression(1, 200000,
-                                                                 math.sqrt(10)):
+                    for channels in geometric_progression(
+                            1, 20000, math.sqrt(10)):
+                        for outstanding in geometric_progression(
+                                1, 200000, math.sqrt(10)):
                             if synchronicity == 'sync' and outstanding > 1200:
                                 continue
                             if outstanding < channels: continue
diff --git a/tools/run_tests/python_utils/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py
index d2941c0..2d22dc1 100755
--- a/tools/run_tests/python_utils/dockerjob.py
+++ b/tools/run_tests/python_utils/dockerjob.py
@@ -50,8 +50,8 @@
             return int(output.split(':', 2)[1])
         except subprocess.CalledProcessError as e:
             pass
-    raise Exception('Failed to get exposed port %s for container %s.' %
-                    (port, cid))
+    raise Exception('Failed to get exposed port %s for container %s.' % (port,
+                                                                         cid))
 
 
 def wait_for_healthy(cid, shortname, timeout_seconds):
diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py
index 8e0dc70..4c09b34 100644
--- a/tools/run_tests/python_utils/filter_pull_request_tests.py
+++ b/tools/run_tests/python_utils/filter_pull_request_tests.py
@@ -124,10 +124,10 @@
   """
     # Get file changes between branch and merge-base of specified branch
     # Not combined to be Windows friendly
-    base_commit = check_output(
-        ["git", "merge-base", base_branch, "HEAD"]).rstrip()
-    return check_output(
-        ["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
+    base_commit = check_output(["git", "merge-base", base_branch,
+                                "HEAD"]).rstrip()
+    return check_output(["git", "diff", base_commit, "--name-only",
+                         "HEAD"]).splitlines()
 
 
 def _can_skip_tests(file_names, triggers):
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 454d09b..6a33913 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -133,12 +133,13 @@
                     logging.info(explanatory_text)
                 logging.info('%s: %s', tag, msg)
             else:
-                sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
-                    _BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
-                    if explanatory_text is not None else '',
-                    _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
-                    tag, msg, '\n'
-                    if do_newline or explanatory_text is not None else ''))
+                sys.stdout.write(
+                    '%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
+                    (_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
+                     if explanatory_text is not None else '',
+                     _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
+                     tag, msg, '\n'
+                     if do_newline or explanatory_text is not None else ''))
             sys.stdout.flush()
             return
         except IOError, e:
@@ -210,8 +211,8 @@
 
     def __str__(self):
         return '%s: %s %s' % (self.shortname, ' '.join(
-            '%s=%s' % kv
-            for kv in self.environ.items()), ' '.join(self.cmdline))
+            '%s=%s' % kv for kv in self.environ.items()),
+                              ' '.join(self.cmdline))
 
 
 class JobResult(object):
@@ -284,8 +285,9 @@
                 self._process = try_start()
                 break
             except OSError:
-                message('WARNING', 'Failed to start %s, retrying in %f seconds'
-                        % (self._spec.shortname, delay))
+                message('WARNING',
+                        'Failed to start %s, retrying in %f seconds' %
+                        (self._spec.shortname, delay))
                 time.sleep(delay)
                 delay *= 2
         else:
@@ -343,8 +345,8 @@
                     if real > 0.5:
                         cores = (user + sys) / real
                         self.result.cpu_measured = float('%.01f' % cores)
-                        self.result.cpu_estimated = float('%.01f' %
-                                                          self._spec.cpu_cost)
+                        self.result.cpu_estimated = float(
+                            '%.01f' % self._spec.cpu_cost)
                         measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
                             self.result.cpu_measured, self.result.cpu_estimated)
                 if not self._quiet_success:
@@ -378,8 +380,8 @@
             else:
                 message(
                     'TIMEOUT',
-                    '%s [pid=%d, time=%.1fsec]' %
-                    (self._spec.shortname, self._process.pid, elapsed),
+                    '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname,
+                                                   self._process.pid, elapsed),
                     stdout(),
                     do_newline=True)
                 self.kill()
diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py
index 5572cdc..37995ac 100644
--- a/tools/run_tests/python_utils/start_port_server.py
+++ b/tools/run_tests/python_utils/start_port_server.py
@@ -43,16 +43,16 @@
     if running:
         current_version = int(
             subprocess.check_output([
-                sys.executable, os.path.abspath(
-                    'tools/run_tests/python_utils/port_server.py'),
+                sys.executable,
+                os.path.abspath('tools/run_tests/python_utils/port_server.py'),
                 'dump_version'
             ]))
         logging.info('my port server is version %d', current_version)
         running = (version >= current_version)
         if not running:
             logging.info('port_server version mismatch: killing the old one')
-            urllib.urlopen('http://localhost:%d/quitquitquit' %
-                           _PORT_SERVER_PORT).read()
+            urllib.urlopen(
+                'http://localhost:%d/quitquitquit' % _PORT_SERVER_PORT).read()
             time.sleep(1)
     if not running:
         fd, logfile = tempfile.mkstemp()
@@ -61,7 +61,8 @@
         args = [
             sys.executable,
             os.path.abspath('tools/run_tests/python_utils/port_server.py'),
-            '-p', '%d' % _PORT_SERVER_PORT, '-l', logfile
+            '-p',
+            '%d' % _PORT_SERVER_PORT, '-l', logfile
         ]
         env = dict(os.environ)
         env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
@@ -91,8 +92,8 @@
                 # try one final time: maybe another build managed to start one
                 time.sleep(1)
                 try:
-                    urllib.urlopen('http://localhost:%d/get' %
-                                   _PORT_SERVER_PORT).read()
+                    urllib.urlopen(
+                        'http://localhost:%d/get' % _PORT_SERVER_PORT).read()
                     logging.info(
                         'last ditch attempt to contact port server succeeded')
                     break
diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py
index 4af00a4..4055332 100755
--- a/tools/run_tests/run_build_statistics.py
+++ b/tools/run_tests/run_build_statistics.py
@@ -61,7 +61,7 @@
 }
 _URL_BASE = 'https://grpc-testing.appspot.com/job'
 
-# This is a dynamic list where known and active issues should be added. 
+# This is a dynamic list where known and active issues should be added.
 # Fixed ones should be removed.
 # Also try not to add multiple messages from the same failure.
 _KNOWN_ERRORS = [
@@ -106,8 +106,8 @@
                 'description': known_error,
                 'count': this_error_count
             })
-            print('====> %d failures due to %s' %
-                  (this_error_count, known_error))
+            print('====> %d failures due to %s' % (this_error_count,
+                                                   known_error))
     return error_list
 
 
@@ -116,8 +116,9 @@
 
 
 def _get_last_processed_buildnumber(build_name):
-    query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
-        _PROJECT_ID, _DATASET_ID, build_name)
+    query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (_PROJECT_ID,
+                                                           _DATASET_ID,
+                                                           build_name)
     query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
     page = bq.jobs().getQueryResults(
         pageToken=None, **query_job['jobReference']).execute(num_retries=3)
@@ -167,8 +168,8 @@
         html = urllib.urlopen(console_url).read()
         build_result['pass_count'] = 0
         build_result['failure_count'] = 1
-        # In this case, the string doesn't exist in the result html but the fact 
-        # that we fail to parse the result html indicates Jenkins failure and hence 
+        # In this case, the string doesn't exist in the result html but the fact
+        # that we fail to parse the result html indicates Jenkins failure and hence
         # no report files were generated.
         build_result['no_report_files_found'] = True
         error_list = _scrape_for_known_errors(html)
@@ -223,7 +224,7 @@
             if build.get_status() == 'ABORTED':
                 continue
             # If any build is still running, stop processing this job. Next time, we
-            # start from where it was left so that all builds are processed 
+            # start from where it was left so that all builds are processed
             # sequentially.
             if build.is_running():
                 print('====> Build %d is still running.' % build_number)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 99f4298..44a6ec2 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -685,7 +685,8 @@
     cmdargs = [
         '--server_host=%s' % server_host_detail[0],
         '--server_host_override=%s' % server_host_detail[1],
-        '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case
+        '--server_port=443', '--use_tls=true',
+        '--test_case=%s' % test_case
     ]
     environ = dict(language.cloud_to_prod_env(), **language.global_env())
     if auth:
@@ -696,18 +697,19 @@
     cwd = language.client_cwd
 
     if docker_image:
-        container_name = dockerjob.random_name('interop_client_%s' %
-                                               language.safename)
+        container_name = dockerjob.random_name(
+            'interop_client_%s' % language.safename)
         cmdline = docker_run_cmdline(
             cmdline,
             image=docker_image,
             cwd=cwd,
             environ=environ,
-            docker_args=['--net=host', '--name=%s' % container_name])
+            docker_args=['--net=host',
+                         '--name=%s' % container_name])
         if manual_cmd_log is not None:
             if manual_cmd_log == []:
-                manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
-                                      docker_image)
+                manual_cmd_log.append(
+                    'echo "Testing ${docker_image:=%s}"' % docker_image)
             manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
         cwd = None
         environ = None
@@ -775,18 +777,19 @@
     environ = language.global_env()
     if docker_image and language.safename != 'objc':
         # we can't run client in docker for objc.
-        container_name = dockerjob.random_name('interop_client_%s' %
-                                               language.safename)
+        container_name = dockerjob.random_name(
+            'interop_client_%s' % language.safename)
         cmdline = docker_run_cmdline(
             cmdline,
             image=docker_image,
             environ=environ,
             cwd=cwd,
-            docker_args=['--net=host', '--name=%s' % container_name])
+            docker_args=['--net=host',
+                         '--name=%s' % container_name])
         if manual_cmd_log is not None:
             if manual_cmd_log == []:
-                manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
-                                      docker_image)
+                manual_cmd_log.append(
+                    'echo "Testing ${docker_image:=%s}"' % docker_image)
             manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
         cwd = None
 
@@ -807,12 +810,12 @@
 
 def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
     """Create jobspec for running a server"""
-    container_name = dockerjob.random_name('interop_server_%s' %
-                                           language.safename)
+    container_name = dockerjob.random_name(
+        'interop_server_%s' % language.safename)
     cmdline = bash_cmdline(
         language.server_cmd([
-            '--port=%s' % _DEFAULT_SERVER_PORT, '--use_tls=%s' % (
-                'false' if insecure else 'true')
+            '--port=%s' % _DEFAULT_SERVER_PORT,
+            '--use_tls=%s' % ('false' if insecure else 'true')
         ]))
     environ = language.global_env()
     docker_args = ['--name=%s' % container_name]
@@ -821,9 +824,9 @@
         # with the server port. These ports are used for http2 interop test
         # (one test case per port).
         docker_args += list(
-            itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
-                                          for i in range(
-                                              len(_HTTP2_SERVER_TEST_CASES))))
+            itertools.chain.from_iterable(
+                ('-p', str(_DEFAULT_SERVER_PORT + i))
+                for i in range(len(_HTTP2_SERVER_TEST_CASES))))
         # Enable docker's healthcheck mechanism.
         # This runs a Python script inside the container every second. The script
         # pings the http2 server to verify it is ready. The 'health-retries' flag
@@ -834,8 +837,8 @@
         # command line.
         docker_args += [
             '--health-cmd=python test/http2_test/http2_server_health_check.py '
-            '--server_host=%s --server_port=%d' %
-            ('localhost', _DEFAULT_SERVER_PORT),
+            '--server_host=%s --server_port=%d' % ('localhost',
+                                                   _DEFAULT_SERVER_PORT),
             '--health-interval=1s',
             '--health-retries=5',
             '--health-timeout=10s',
@@ -852,8 +855,8 @@
         docker_args=docker_args)
     if manual_cmd_log is not None:
         if manual_cmd_log == []:
-            manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' %
-                                  docker_image)
+            manual_cmd_log.append(
+                'echo "Testing ${docker_image:=%s}"' % docker_image)
         manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
     server_job = jobset.JobSpec(
         cmdline=docker_cmdline,
@@ -974,7 +977,8 @@
     '--override_server',
     action='append',
     type=lambda kv: kv.split('='),
-    help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+    help=
+    'Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
     default=[])
 argp.add_argument(
     '-t', '--travis', default=False, action='store_const', const=True)
@@ -993,7 +997,8 @@
     default=False,
     action='store_const',
     const=True,
-    help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+    help=
+    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
 )
 argp.add_argument(
     '--manual_run',
@@ -1014,7 +1019,8 @@
     default=False,
     action='store_const',
     const=True,
-    help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+    help=
+    'Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
 )
 argp.add_argument(
     '--insecure',
@@ -1039,8 +1045,8 @@
 
 servers = set(
     s
-    for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
-                                           for x in args.server))
+    for s in itertools.chain.from_iterable(
+        _SERVERS if x == 'all' else [x] for x in args.server))
 
 if args.use_docker:
     if not args.travis:
@@ -1067,10 +1073,9 @@
 # we want to include everything but objc in 'all'
 # because objc won't run on non-mac platforms
 all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
-languages = set(
-    _LANGUAGES[l]
-    for l in itertools.chain.from_iterable(all_but_objc if x == 'all' else [x]
-                                           for x in args.language))
+languages = set(_LANGUAGES[l]
+                for l in itertools.chain.from_iterable(
+                    all_but_objc if x == 'all' else [x] for x in args.language))
 
 languages_http2_clients_for_http2_server_interop = set()
 if args.http2_server_interop:
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 561217c..4e4d05c 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -92,8 +92,9 @@
         benchmarks.append(
             jobset.JobSpec(
                 [
-                    'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' %
-                    line, '--benchmark_min_time=0.05'
+                    'bins/basicprof/%s' % bm_name,
+                    '--benchmark_filter=^%s$' % line,
+                    '--benchmark_min_time=0.05'
                 ],
                 environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
                 shortname='profile-%s' % fnize(line)))
@@ -102,8 +103,9 @@
                 [
                     sys.executable,
                     'tools/profiling/latency_profile/profile_analyzer.py',
-                    '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
-                    '--out', 'reports/%s.txt' % fnize(line)
+                    '--source',
+                    '%s.trace' % fnize(line), '--fmt', 'simple', '--out',
+                    'reports/%s.txt' % fnize(line)
                 ],
                 timeout_seconds=20 * 60,
                 shortname='analyze-%s' % fnize(line)))
@@ -116,7 +118,8 @@
             # run up to half the cpu count: each benchmark can use up to two cores
             # (one for the microbenchmark, one for the data flush)
             jobset.run(
-                benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+                benchmarks, maxjobs=max(1,
+                                        multiprocessing.cpu_count() / 2))
             jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
             jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
             benchmarks = []
@@ -145,8 +148,9 @@
         benchmarks.append(
             jobset.JobSpec(
                 [
-                    'perf', 'record', '-o', '%s-perf.data' % fnize(
-                        line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name,
+                    'perf', 'record', '-o',
+                    '%s-perf.data' % fnize(line), '-g', '-F', '997',
+                    'bins/mutrace/%s' % bm_name,
                     '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
                 ],
                 shortname='perf-%s' % fnize(line)))
@@ -183,12 +187,14 @@
 
 def run_summary(bm_name, cfg, base_json_name):
     subprocess.check_call([
-        'make', bm_name, 'CONFIG=%s' % cfg, '-j',
+        'make', bm_name,
+        'CONFIG=%s' % cfg, '-j',
         '%d' % multiprocessing.cpu_count()
     ])
     cmd = [
-        'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' %
-        (base_json_name, cfg), '--benchmark_out_format=json'
+        'bins/%s/%s' % (cfg, bm_name),
+        '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
+        '--benchmark_out_format=json'
     ]
     if args.summary_time is not None:
         cmd += ['--benchmark_min_time=%d' % args.summary_time]
@@ -205,10 +211,12 @@
             f.write(
                 subprocess.check_output([
                     'tools/profiling/microbenchmarks/bm2bq.py',
-                    '%s.counters.json' % bm_name, '%s.opt.json' % bm_name
+                    '%s.counters.json' % bm_name,
+                    '%s.opt.json' % bm_name
                 ]))
         subprocess.check_call([
-            'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name
+            'bq', 'load', 'microbenchmarks.microbenchmarks',
+            '%s.csv' % bm_name
         ])
 
 
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 03b684b..9a9f74e 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -100,7 +100,8 @@
     jobspec = jobset.JobSpec(
         cmdline=cmdline,
         shortname=shortname,
-        timeout_seconds=worker_timeout,  # workers get restarted after each scenario
+        timeout_seconds=
+        worker_timeout,  # workers get restarted after each scenario
         verbose_success=True)
     return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
 
@@ -298,7 +299,8 @@
             port=worker[1] + language.worker_port_offset(),
             remote_host=worker[0],
             perf_cmd=perf_cmd)
-        for language in languages for worker_idx, worker in enumerate(workers)
+        for language in languages
+        for worker_idx, worker in enumerate(workers)
     ]
 
 
@@ -367,10 +369,10 @@
                     workers = workers_by_lang[str(language)][:]
                     # 'SERVER_LANGUAGE' is an indicator for this script to pick
                     # a server in different language.
-                    custom_server_lang = scenario_json.get('SERVER_LANGUAGE',
-                                                           None)
-                    custom_client_lang = scenario_json.get('CLIENT_LANGUAGE',
-                                                           None)
+                    custom_server_lang = scenario_json.get(
+                        'SERVER_LANGUAGE', None)
+                    custom_client_lang = scenario_json.get(
+                        'CLIENT_LANGUAGE', None)
                     scenario_json = scenario_config.remove_nonproto_fields(
                         scenario_json)
                     if custom_server_lang and custom_client_lang:
@@ -480,8 +482,8 @@
     argp.add_argument(
         '--remote_driver_host',
         default=None,
-        help='Run QPS driver on given host. By default, QPS driver is run locally.'
-    )
+        help=
+        'Run QPS driver on given host. By default, QPS driver is run locally.')
     argp.add_argument(
         '--remote_worker_host',
         nargs='+',
@@ -560,7 +562,8 @@
         '--flame_graph_reports',
         default='perf_reports',
         type=str,
-        help='Name of directory to output flame graph profiles to, if any are created.'
+        help=
+        'Name of directory to output flame graph profiles to, if any are created.'
     )
     argp.add_argument(
         '-u',
@@ -662,15 +665,16 @@
                         six.iteritems(resultset)))
             finally:
                 # Consider qps workers that need to be killed as failures
-                qps_workers_killed += finish_qps_workers(scenario.workers,
-                                                         qpsworker_jobs)
+                qps_workers_killed += finish_qps_workers(
+                    scenario.workers, qpsworker_jobs)
 
             if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
                 workers_and_base_names = {}
                 for worker in scenario.workers:
                     if not worker.perf_file_base_name:
                         raise Exception(
-                            'using perf buf perf report filename is unspecified')
+                            'using perf buf perf report filename is unspecified'
+                        )
                     workers_and_base_names[
                         worker.host_and_port] = worker.perf_file_base_name
                 perf_report_failures += run_collect_perf_profile_jobs(
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 3aa9eb8..c8e917f 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -182,15 +182,15 @@
         js = json.load(f)
         return [
             tgt for tgt in js
-            if tgt['language'] == test_lang and platform_string() in tgt[
-                platforms_str] and not (travis and tgt['flaky'])
+            if tgt['language'] == test_lang and platform_string() in
+            tgt[platforms_str] and not (travis and tgt['flaky'])
         ]
 
 
 def _check_compiler(compiler, supported_compilers):
     if compiler not in supported_compilers:
-        raise Exception('Compiler %s not supported (on this platform).' %
-                        compiler)
+        raise Exception(
+            'Compiler %s not supported (on this platform).' % compiler)
 
 
 def _check_arch(arch, supported_archs):
@@ -263,9 +263,9 @@
         self.config = config
         self.args = args
         if self.platform == 'windows':
-            _check_compiler(self.args.compiler, [
-                'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
-            ])
+            _check_compiler(
+                self.args.compiler,
+                ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
             _check_arch(self.args.arch, ['default', 'x64', 'x86'])
             self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
             self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
@@ -305,9 +305,9 @@
                 # cmake doesn't build boringssl tests
                 continue
             auto_timeout_scaling = target.get('auto_timeout_scaling', True)
-            polling_strategies = (
-                _POLLING_STRATEGIES.get(self.platform, ['all'])
-                if target.get('uses_polling', True) else ['none'])
+            polling_strategies = (_POLLING_STRATEGIES.get(
+                self.platform, ['all']) if target.get('uses_polling', True) else
+                                  ['none'])
             if self.args.iomgr_platform == 'uv':
                 polling_strategies = ['all']
             for polling_strategy in polling_strategies:
@@ -377,7 +377,8 @@
                         for line in tests.split('\n'):
                             test = line.strip()
                             if not test: continue
-                            cmdline = [binary, '--benchmark_filter=%s$' % test
+                            cmdline = [binary,
+                                       '--benchmark_filter=%s$' % test
                                       ] + target['args']
                             out.append(
                                 self.config.job_spec(
@@ -408,7 +409,8 @@
                                 assert base is not None
                                 assert line[1] == ' '
                                 test = base + line.strip()
-                                cmdline = [binary, '--gtest_filter=%s' % test
+                                cmdline = [binary,
+                                           '--gtest_filter=%s' % test
                                           ] + target['args']
                                 out.append(
                                     self.config.job_spec(
@@ -445,8 +447,8 @@
             # don't build tools on windows just yet
             return ['buildtests_%s' % self.make_target]
         return [
-            'buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
-            'check_epollexclusive'
+            'buildtests_%s' % self.make_target,
+            'tools_%s' % self.make_target, 'check_epollexclusive'
         ]
 
     def make_options(self):
@@ -480,14 +482,18 @@
 
     def _clang_make_options(self, version_suffix=''):
         return [
-            'CC=clang%s' % version_suffix, 'CXX=clang++%s' % version_suffix,
-            'LD=clang%s' % version_suffix, 'LDXX=clang++%s' % version_suffix
+            'CC=clang%s' % version_suffix,
+            'CXX=clang++%s' % version_suffix,
+            'LD=clang%s' % version_suffix,
+            'LDXX=clang++%s' % version_suffix
         ]
 
     def _gcc_make_options(self, version_suffix):
         return [
-            'CC=gcc%s' % version_suffix, 'CXX=g++%s' % version_suffix,
-            'LD=gcc%s' % version_suffix, 'LDXX=g++%s' % version_suffix
+            'CC=gcc%s' % version_suffix,
+            'CXX=g++%s' % version_suffix,
+            'LD=gcc%s' % version_suffix,
+            'LDXX=g++%s' % version_suffix
         ]
 
     def _compiler_options(self, use_docker, compiler):
@@ -700,8 +706,8 @@
                 environ=dict(
                     list(environment.items()) + [(
                         'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
-                shortname='%s.test.%s' % (config.name, suite_name),)
-            for suite_name in tests_json for config in self.pythons
+                shortname='%s.test.%s' % (config.name, suite_name),
+            ) for suite_name in tests_json for config in self.pythons
         ]
 
     def pre_build_steps(self):
@@ -801,7 +807,10 @@
             if os.name == 'nt':
                 return (python35_config,)
             else:
-                return (python27_config, python34_config,)
+                return (
+                    python27_config,
+                    python34_config,
+                )
         elif args.compiler == 'python2.7':
             return (python27_config,)
         elif args.compiler == 'python3.4':
@@ -817,8 +826,12 @@
         elif args.compiler == 'python_alpine':
             return (python27_config,)
         elif args.compiler == 'all_the_cpythons':
-            return (python27_config, python34_config, python35_config,
-                    python36_config,)
+            return (
+                python27_config,
+                python34_config,
+                python35_config,
+                python36_config,
+            )
         else:
             raise Exception('Compiler %s not supported.' % args.compiler)
 
@@ -921,13 +934,15 @@
 
         specs = []
         for assembly in six.iterkeys(tests_by_assembly):
-            assembly_file = 'src/csharp/%s/%s/%s%s' % (
-                assembly, assembly_subdir, assembly, assembly_extension)
+            assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
+                                                       assembly_subdir,
+                                                       assembly,
+                                                       assembly_extension)
             if self.config.build_config != 'gcov' or self.platform != 'windows':
                 # normally, run each test as a separate process
                 for test in tests_by_assembly[assembly]:
-                    cmdline = runtime_cmd + [assembly_file, '--test=%s' % test
-                                            ] + nunit_args
+                    cmdline = runtime_cmd + [assembly_file,
+                                             '--test=%s' % test] + nunit_args
                     specs.append(
                         self.config.job_spec(
                             cmdline,
@@ -1147,8 +1162,8 @@
 
 # different configurations we can run under
 with open('tools/run_tests/generated/configs.json') as f:
-    _CONFIGS = dict((cfg['config'], Config(**cfg))
-                    for cfg in ast.literal_eval(f.read()))
+    _CONFIGS = dict(
+        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
 
 _LANGUAGES = {
     'c++': CLanguage('cxx', 'c++'),
@@ -1298,13 +1313,15 @@
     default=False,
     action='store_const',
     const=True,
-    help='Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+    help=
+    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
 )
 argp.add_argument(
     '--arch',
     choices=['default', 'x86', 'x64'],
     default='default',
-    help='Selects architecture to target. For some platforms "default" is the only supported choice.'
+    help=
+    'Selects architecture to target. For some platforms "default" is the only supported choice.'
 )
 argp.add_argument(
     '--compiler',
@@ -1316,7 +1333,8 @@
         'cmake_vs2015', 'cmake_vs2017'
     ],
     default='default',
-    help='Selects compiler to use. Allowed values depend on the platform and language.'
+    help=
+    'Selects compiler to use. Allowed values depend on the platform and language.'
 )
 argp.add_argument(
     '--iomgr_platform',
@@ -1339,7 +1357,8 @@
     '--update_submodules',
     default=[],
     nargs='*',
-    help='Update some submodules before building. If any are updated, also run generate_projects. '
+    help=
+    'Update some submodules before building. If any are updated, also run generate_projects. '
     +
     'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
 )
@@ -1360,7 +1379,8 @@
     default=False,
     action='store_const',
     const=True,
-    help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+    help=
+    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
     + 'Useful when running many iterations of each test (argument -n).')
 argp.add_argument(
     '--force_default_poller',
@@ -1399,8 +1419,8 @@
             if test.flaky: flaky_tests.add(test.name)
             if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
     except:
-        print("Unexpected error getting flaky tests: %s" %
-              traceback.format_exc())
+        print(
+            "Unexpected error getting flaky tests: %s" % traceback.format_exc())
 
 if args.force_default_poller:
     _POLLING_STRATEGIES = {}
@@ -1473,7 +1493,8 @@
         language_make_options = list(
             set([
                 make_option
-                for lang in languages for make_option in lang.make_options()
+                for lang in languages
+                for make_option in lang.make_options()
             ]))
 
 if args.use_docker:
@@ -1530,8 +1551,8 @@
         return [
             jobset.JobSpec(
                 [
-                    'cmake', '--build', '.', '--target', '%s' % target,
-                    '--config', _MSBUILD_CONFIG[cfg]
+                    'cmake', '--build', '.', '--target',
+                    '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
                 ],
                 cwd=os.path.dirname(makefile),
                 timeout_seconds=None) for target in targets
@@ -1541,8 +1562,8 @@
             # With cmake, we've passed all the build configuration in the pre-build step already
             return [
                 jobset.JobSpec(
-                    [os.getenv('MAKE', 'make'), '-j', '%d' % args.jobs] +
-                    targets,
+                    [os.getenv('MAKE', 'make'), '-j',
+                     '%d' % args.jobs] + targets,
                     cwd='cmake/build',
                     timeout_seconds=None)
             ]
@@ -1550,10 +1571,11 @@
             return [
                 jobset.JobSpec(
                     [
-                        os.getenv('MAKE', 'make'), '-f', makefile, '-j', '%d' %
-                        args.jobs,
+                        os.getenv('MAKE', 'make'), '-f', makefile, '-j',
+                        '%d' % args.jobs,
                         'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
-                        args.slowdown, 'CONFIG=%s' % cfg, 'Q='
+                        args.slowdown,
+                        'CONFIG=%s' % cfg, 'Q='
                     ] + language_make_options +
                     ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
                     timeout_seconds=None)
@@ -1565,8 +1587,8 @@
 make_targets = {}
 for l in languages:
     makefile = l.makefile_name()
-    make_targets[makefile] = make_targets.get(
-        makefile, set()).union(set(l.make_targets()))
+    make_targets[makefile] = make_targets.get(makefile, set()).union(
+        set(l.make_targets()))
 
 
 def build_step_environ(cfg):
@@ -1581,7 +1603,8 @@
     set(
         jobset.JobSpec(
             cmdline, environ=build_step_environ(build_config), flake_retries=2)
-        for l in languages for cmdline in l.pre_build_steps()))
+        for l in languages
+        for cmdline in l.pre_build_steps()))
 if make_targets:
     make_commands = itertools.chain.from_iterable(
         make_jobspec(build_config, list(targets), makefile)
@@ -1593,12 +1616,14 @@
             cmdline,
             environ=build_step_environ(build_config),
             timeout_seconds=None)
-        for l in languages for cmdline in l.build_steps()))
+        for l in languages
+        for cmdline in l.build_steps()))
 
 post_tests_steps = list(
     set(
         jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
-        for l in languages for cmdline in l.post_tests_steps()))
+        for l in languages
+        for cmdline in l.post_tests_steps()))
 runs_per_test = args.runs_per_test
 forever = args.forever
 
@@ -1612,8 +1637,8 @@
     except:
         pass
     else:
-        urllib.request.urlopen('http://localhost:%d/quitquitquit' %
-                               legacy_server_port).read()
+        urllib.request.urlopen(
+            'http://localhost:%d/quitquitquit' % legacy_server_port).read()
 
 
 def _calculate_num_runs_failures(list_of_results):
@@ -1679,8 +1704,8 @@
         return []
 
     if not args.travis and not _has_epollexclusive() and platform_string(
-    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
-            platform_string()]:
+    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
+    )]:
         print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
         _POLLING_STRATEGIES[platform_string()].remove('epollex')
 
@@ -1694,12 +1719,11 @@
     num_test_failures = 0
     try:
         infinite_runs = runs_per_test == 0
-        one_run = set(spec
-                      for language in languages
-                      for spec in language.test_specs()
-                      if (re.search(args.regex, spec.shortname) and (
-                          args.regex_exclude == '' or not re.search(
-                              args.regex_exclude, spec.shortname))))
+        one_run = set(
+            spec for language in languages for spec in language.test_specs()
+            if (re.search(args.regex, spec.shortname) and
+                (args.regex_exclude == '' or
+                 not re.search(args.regex_exclude, spec.shortname))))
         # When running on travis, we want out test runs to be as similar as possible
         # for reproducibility purposes.
         if args.travis and args.max_time <= 0:
@@ -1722,8 +1746,9 @@
         if infinite_runs:
             assert len(massaged_one_run
                       ) > 0, 'Must have at least one test for a -n inf run'
-        runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
-                         else itertools.repeat(massaged_one_run, runs_per_test))
+        runs_sequence = (itertools.repeat(massaged_one_run)
+                         if infinite_runs else itertools.repeat(
+                             massaged_one_run, runs_per_test))
         all_runs = itertools.chain.from_iterable(runs_sequence)
 
         if args.quiet_success:
@@ -1750,8 +1775,8 @@
                     else:
                         jobset.message(
                             'FLAKE',
-                            '%s [%d/%d runs flaked]' %
-                            (k, num_failures, num_runs),
+                            '%s [%d/%d runs flaked]' % (k, num_failures,
+                                                        num_runs),
                             do_newline=True)
     finally:
         for antagonist in antagonists:
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 49be8f1..ac90bef 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -65,8 +65,10 @@
     test_job = jobset.JobSpec(
         cmdline=[
             'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
-            '-j', str(inner_jobs), '-x', _report_filename(name),
-            '--report_suite_name', '%s' % name
+            '-j',
+            str(inner_jobs), '-x',
+            _report_filename(name), '--report_suite_name',
+            '%s' % name
         ] + runtests_args,
         environ=runtests_envs,
         shortname='run_tests_%s' % name,
@@ -90,8 +92,10 @@
     test_job = jobset.JobSpec(
         cmdline=[
             'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
-            '-t', '-j', str(inner_jobs), '-x', '../%s' % _report_filename(name),
-            '--report_suite_name', '%s' % name
+            '-t', '-j',
+            str(inner_jobs), '-x',
+            '../%s' % _report_filename(name), '--report_suite_name',
+            '%s' % name
         ] + runtests_args,
         environ=env,
         shortname='run_tests_%s' % name,
@@ -492,8 +496,8 @@
 
     jobs = []
     for job in all_jobs:
-        if not args.filter or all(filter in job.labels
-                                  for filter in args.filter):
+        if not args.filter or all(
+                filter in job.labels for filter in args.filter):
             if not any(exclude_label in job.labels
                        for exclude_label in args.exclude):
                 jobs.append(job)
diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py
index c2a6399..ebbb1a9 100755
--- a/tools/run_tests/sanity/check_test_filtering.py
+++ b/tools/run_tests/sanity/check_test_filtering.py
@@ -80,7 +80,8 @@
                 if (label in job.labels):
                     jobs_matching_labels += 1
         self.assertEquals(
-            len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
+            len(filtered_jobs),
+            len(all_jobs) - jobs_matching_labels)
 
     def test_individual_language_filters(self):
         # Changing unlisted file should trigger all languages