Commit 94b13bbae90bfb94204b8fe9c531bc163e746a9f

Authored by Masahiro Yamada
Committed by Tom Rini
1 parent 56d1dded62

host-tools: use python2 explicitly for shebang

All of these host tools are apparently written for Python2,
not Python3.

Use 'python2' in the shebang line according to PEP 394
(https://www.python.org/dev/peps/pep-0394/).

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Reviewed-by: Simon Glass <sjg@chromium.org>

Showing 8 changed files with 8 additions and 8 deletions Inline Diff

scripts/dtc/pylibfdt/setup.py
1 #!/usr/bin/env python 1 #!/usr/bin/env python2
2 2
3 """ 3 """
4 setup.py file for SWIG libfdt 4 setup.py file for SWIG libfdt
5 Copyright (C) 2017 Google, Inc. 5 Copyright (C) 2017 Google, Inc.
6 Written by Simon Glass <sjg@chromium.org> 6 Written by Simon Glass <sjg@chromium.org>
7 7
8 SPDX-License-Identifier: GPL-2.0+ BSD-2-Clause 8 SPDX-License-Identifier: GPL-2.0+ BSD-2-Clause
9 9
10 Files to be built into the extension are provided in SOURCES 10 Files to be built into the extension are provided in SOURCES
11 C flags to use are provided in CPPFLAGS 11 C flags to use are provided in CPPFLAGS
12 Object file directory is provided in OBJDIR 12 Object file directory is provided in OBJDIR
13 Version is provided in VERSION 13 Version is provided in VERSION
14 14
15 If these variables are not given they are parsed from the Makefiles. This 15 If these variables are not given they are parsed from the Makefiles. This
16 allows this script to be run stand-alone, e.g.: 16 allows this script to be run stand-alone, e.g.:
17 17
18 ./pylibfdt/setup.py install [--prefix=...] 18 ./pylibfdt/setup.py install [--prefix=...]
19 """ 19 """
20 20
21 from distutils.core import setup, Extension 21 from distutils.core import setup, Extension
22 import os 22 import os
23 import re 23 import re
24 import sys 24 import sys
25 25
26 # Decodes a Makefile assignment line into key and value (and plus for +=) 26 # Decodes a Makefile assignment line into key and value (and plus for +=)
27 RE_KEY_VALUE = re.compile('(?P<key>\w+) *(?P<plus>[+])?= *(?P<value>.*)$') 27 RE_KEY_VALUE = re.compile('(?P<key>\w+) *(?P<plus>[+])?= *(?P<value>.*)$')
28 28
29 29
30 def ParseMakefile(fname): 30 def ParseMakefile(fname):
31 """Parse a Makefile to obtain its variables. 31 """Parse a Makefile to obtain its variables.
32 32
33 This collects variable assigments of the form: 33 This collects variable assigments of the form:
34 34
35 VAR = value 35 VAR = value
36 VAR += more 36 VAR += more
37 37
38 It does not pick out := assignments, as these are not needed here. It does 38 It does not pick out := assignments, as these are not needed here. It does
39 handle line continuation. 39 handle line continuation.
40 40
41 Returns a dict: 41 Returns a dict:
42 key: Variable name (e.g. 'VAR') 42 key: Variable name (e.g. 'VAR')
43 value: Variable value (e.g. 'value more') 43 value: Variable value (e.g. 'value more')
44 """ 44 """
45 makevars = {} 45 makevars = {}
46 with open(fname) as fd: 46 with open(fname) as fd:
47 prev_text = '' # Continuation text from previous line(s) 47 prev_text = '' # Continuation text from previous line(s)
48 for line in fd.read().splitlines(): 48 for line in fd.read().splitlines():
49 if line and line[-1] == '\\': # Deal with line continuation 49 if line and line[-1] == '\\': # Deal with line continuation
50 prev_text += line[:-1] 50 prev_text += line[:-1]
51 continue 51 continue
52 elif prev_text: 52 elif prev_text:
53 line = prev_text + line 53 line = prev_text + line
54 prev_text = '' # Continuation is now used up 54 prev_text = '' # Continuation is now used up
55 m = RE_KEY_VALUE.match(line) 55 m = RE_KEY_VALUE.match(line)
56 if m: 56 if m:
57 value = m.group('value') or '' 57 value = m.group('value') or ''
58 key = m.group('key') 58 key = m.group('key')
59 59
60 # Appending to a variable inserts a space beforehand 60 # Appending to a variable inserts a space beforehand
61 if 'plus' in m.groupdict() and key in makevars: 61 if 'plus' in m.groupdict() and key in makevars:
62 makevars[key] += ' ' + value 62 makevars[key] += ' ' + value
63 else: 63 else:
64 makevars[key] = value 64 makevars[key] = value
65 return makevars 65 return makevars
66 66
67 def GetEnvFromMakefiles(): 67 def GetEnvFromMakefiles():
68 """Scan the Makefiles to obtain the settings we need. 68 """Scan the Makefiles to obtain the settings we need.
69 69
70 This assumes that this script is being run from the top-level directory, 70 This assumes that this script is being run from the top-level directory,
71 not the pylibfdt directory. 71 not the pylibfdt directory.
72 72
73 Returns: 73 Returns:
74 Tuple with: 74 Tuple with:
75 List of swig options 75 List of swig options
76 Version string 76 Version string
77 List of files to build 77 List of files to build
78 List of extra C preprocessor flags needed 78 List of extra C preprocessor flags needed
79 Object directory to use (always '') 79 Object directory to use (always '')
80 """ 80 """
81 basedir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) 81 basedir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
82 swig_opts = ['-I%s' % basedir] 82 swig_opts = ['-I%s' % basedir]
83 makevars = ParseMakefile(os.path.join(basedir, 'Makefile')) 83 makevars = ParseMakefile(os.path.join(basedir, 'Makefile'))
84 version = '%s.%s.%s' % (makevars['VERSION'], makevars['PATCHLEVEL'], 84 version = '%s.%s.%s' % (makevars['VERSION'], makevars['PATCHLEVEL'],
85 makevars['SUBLEVEL']) 85 makevars['SUBLEVEL'])
86 makevars = ParseMakefile(os.path.join(basedir, 'libfdt', 'Makefile.libfdt')) 86 makevars = ParseMakefile(os.path.join(basedir, 'libfdt', 'Makefile.libfdt'))
87 files = makevars['LIBFDT_SRCS'].split() 87 files = makevars['LIBFDT_SRCS'].split()
88 files = [os.path.join(basedir, 'libfdt', fname) for fname in files] 88 files = [os.path.join(basedir, 'libfdt', fname) for fname in files]
89 files.append('pylibfdt/libfdt.i') 89 files.append('pylibfdt/libfdt.i')
90 cflags = ['-I%s' % basedir, '-I%s/libfdt' % basedir] 90 cflags = ['-I%s' % basedir, '-I%s/libfdt' % basedir]
91 objdir = '' 91 objdir = ''
92 return swig_opts, version, files, cflags, objdir 92 return swig_opts, version, files, cflags, objdir
93 93
94 94
95 progname = sys.argv[0] 95 progname = sys.argv[0]
96 files = os.environ.get('SOURCES', '').split() 96 files = os.environ.get('SOURCES', '').split()
97 cflags = os.environ.get('CPPFLAGS', '').split() 97 cflags = os.environ.get('CPPFLAGS', '').split()
98 objdir = os.environ.get('OBJDIR') 98 objdir = os.environ.get('OBJDIR')
99 version = os.environ.get('VERSION') 99 version = os.environ.get('VERSION')
100 swig_opts = os.environ.get('SWIG_OPTS', '').split() 100 swig_opts = os.environ.get('SWIG_OPTS', '').split()
101 101
102 # If we were called directly rather than through our Makefile (which is often 102 # If we were called directly rather than through our Makefile (which is often
103 # the case with Python module installation), read the settings from the 103 # the case with Python module installation), read the settings from the
104 # Makefile. 104 # Makefile.
105 if not all((swig_opts, version, files, cflags, objdir)): 105 if not all((swig_opts, version, files, cflags, objdir)):
106 swig_opts, version, files, cflags, objdir = GetEnvFromMakefiles() 106 swig_opts, version, files, cflags, objdir = GetEnvFromMakefiles()
107 107
108 libfdt_module = Extension( 108 libfdt_module = Extension(
109 '_libfdt', 109 '_libfdt',
110 sources = files, 110 sources = files,
111 extra_compile_args = cflags, 111 extra_compile_args = cflags,
112 swig_opts = swig_opts, 112 swig_opts = swig_opts,
113 ) 113 )
114 114
115 setup( 115 setup(
116 name='libfdt', 116 name='libfdt',
117 version= version, 117 version= version,
118 author='Simon Glass <sjg@chromium.org>', 118 author='Simon Glass <sjg@chromium.org>',
119 description='Python binding for libfdt', 119 description='Python binding for libfdt',
120 ext_modules=[libfdt_module], 120 ext_modules=[libfdt_module],
121 package_dir={'': objdir}, 121 package_dir={'': objdir},
122 py_modules=['pylibfdt/libfdt'], 122 py_modules=['pylibfdt/libfdt'],
123 ) 123 )
124 124
1 #!/usr/bin/env python 1 #!/usr/bin/env python2
2 # 2 #
3 # Copyright (C) 2014, Masahiro Yamada <yamada.m@jp.panasonic.com> 3 # Copyright (C) 2014, Masahiro Yamada <yamada.m@jp.panasonic.com>
4 # 4 #
5 # SPDX-License-Identifier: GPL-2.0+ 5 # SPDX-License-Identifier: GPL-2.0+
6 # 6 #
7 7
8 ''' 8 '''
9 A tool to create/update the mailmap file 9 A tool to create/update the mailmap file
10 10
11 The command 'git shortlog' summarizes git log output in a format suitable 11 The command 'git shortlog' summarizes git log output in a format suitable
12 for inclusion in release announcements. Each commit will be grouped by 12 for inclusion in release announcements. Each commit will be grouped by
13 author and title. 13 author and title.
14 14
15 One problem is that the authors' name and/or email address is sometimes 15 One problem is that the authors' name and/or email address is sometimes
16 spelled differently. The .mailmap feature can be used to coalesce together 16 spelled differently. The .mailmap feature can be used to coalesce together
17 commits by the same persion. 17 commits by the same persion.
18 (See 'man git-shortlog' for furthur information of this feature.) 18 (See 'man git-shortlog' for furthur information of this feature.)
19 19
20 This tool helps to create/update the mailmap file. 20 This tool helps to create/update the mailmap file.
21 21
22 It runs 'git shortlog' internally and searches differently spelled author 22 It runs 'git shortlog' internally and searches differently spelled author
23 names which share the same email address. The author name with the most 23 names which share the same email address. The author name with the most
24 commits is asuumed to be a canonical real name. If the number of commits 24 commits is asuumed to be a canonical real name. If the number of commits
25 from the cananonical name is equal to or greater than 'MIN_COMMITS', 25 from the cananonical name is equal to or greater than 'MIN_COMMITS',
26 the entry for the cananical name will be output. ('MIN_COMMITS' is used 26 the entry for the cananical name will be output. ('MIN_COMMITS' is used
27 here because we do not want to create a fat mailmap by adding every author 27 here because we do not want to create a fat mailmap by adding every author
28 with only a few commits.) 28 with only a few commits.)
29 29
30 If there exists a mailmap file specified by the mailmap.file configuration 30 If there exists a mailmap file specified by the mailmap.file configuration
31 options or '.mailmap' at the toplevel of the repository, it is used as 31 options or '.mailmap' at the toplevel of the repository, it is used as
32 a base file. (The mailmap.file configuration takes precedence over the 32 a base file. (The mailmap.file configuration takes precedence over the
33 '.mailmap' file if both exist.) 33 '.mailmap' file if both exist.)
34 34
35 The base file and the newly added entries are merged together and sorted 35 The base file and the newly added entries are merged together and sorted
36 alphabetically (but the comment block is kept untouched), and then printed 36 alphabetically (but the comment block is kept untouched), and then printed
37 to standard output. 37 to standard output.
38 38
39 Usage 39 Usage
40 ----- 40 -----
41 41
42 scripts/mailmapper 42 scripts/mailmapper
43 43
44 prints the mailmapping to standard output. 44 prints the mailmapping to standard output.
45 45
46 scripts/mailmapper > tmp; mv tmp .mailmap 46 scripts/mailmapper > tmp; mv tmp .mailmap
47 47
48 will be useful for updating '.mailmap' file. 48 will be useful for updating '.mailmap' file.
49 ''' 49 '''
50 50
51 import sys 51 import sys
52 import os 52 import os
53 import subprocess 53 import subprocess
54 54
55 # The entries only for the canonical names with MIN_COMMITS or more commits. 55 # The entries only for the canonical names with MIN_COMMITS or more commits.
56 # This limitation is used so as not to create a too big mailmap file. 56 # This limitation is used so as not to create a too big mailmap file.
57 MIN_COMMITS = 50 57 MIN_COMMITS = 50
58 58
59 try: 59 try:
60 toplevel = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']) 60 toplevel = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'])
61 except subprocess.CalledProcessError: 61 except subprocess.CalledProcessError:
62 sys.exit('Please run in a git repository.') 62 sys.exit('Please run in a git repository.')
63 63
64 # strip '\n' 64 # strip '\n'
65 toplevel = toplevel.rstrip() 65 toplevel = toplevel.rstrip()
66 66
67 # Change the current working directory to the toplevel of the respository 67 # Change the current working directory to the toplevel of the respository
68 # for our easier life. 68 # for our easier life.
69 os.chdir(toplevel) 69 os.chdir(toplevel)
70 70
71 # First, create 'auther name' vs 'number of commits' database. 71 # First, create 'auther name' vs 'number of commits' database.
72 # We assume the name with the most commits as the canonical real name. 72 # We assume the name with the most commits as the canonical real name.
73 shortlog = subprocess.check_output(['git', 'shortlog', '-s', '-n']) 73 shortlog = subprocess.check_output(['git', 'shortlog', '-s', '-n'])
74 74
75 commits_per_name = {} 75 commits_per_name = {}
76 76
77 for line in shortlog.splitlines(): 77 for line in shortlog.splitlines():
78 try: 78 try:
79 commits, name = line.split(None, 1) 79 commits, name = line.split(None, 1)
80 except ValueError: 80 except ValueError:
81 # ignore lines with an empty author name 81 # ignore lines with an empty author name
82 pass 82 pass
83 commits_per_name[name] = int(commits) 83 commits_per_name[name] = int(commits)
84 84
85 # Next, coalesce the auther names with the same email address 85 # Next, coalesce the auther names with the same email address
86 shortlog = subprocess.check_output(['git', 'shortlog', '-s', '-n', '-e']) 86 shortlog = subprocess.check_output(['git', 'shortlog', '-s', '-n', '-e'])
87 87
88 mail_vs_name = {} 88 mail_vs_name = {}
89 output = {} 89 output = {}
90 90
91 for line in shortlog.splitlines(): 91 for line in shortlog.splitlines():
92 # tmp, mail = line.rsplit(None, 1) is not safe 92 # tmp, mail = line.rsplit(None, 1) is not safe
93 # because weird email addresses might include whitespaces 93 # because weird email addresses might include whitespaces
94 tmp, mail = line.split('<') 94 tmp, mail = line.split('<')
95 mail = '<' + mail.rstrip() 95 mail = '<' + mail.rstrip()
96 try: 96 try:
97 _, name = tmp.rstrip().split(None, 1) 97 _, name = tmp.rstrip().split(None, 1)
98 except ValueError: 98 except ValueError:
99 # author name is empty 99 # author name is empty
100 name = '' 100 name = ''
101 if mail in mail_vs_name: 101 if mail in mail_vs_name:
102 # another name for the same email address 102 # another name for the same email address
103 prev_name = mail_vs_name[mail] 103 prev_name = mail_vs_name[mail]
104 # Take the name with more commits 104 # Take the name with more commits
105 major_name = sorted([prev_name, name], 105 major_name = sorted([prev_name, name],
106 key=lambda x: commits_per_name[x] if x else 0)[1] 106 key=lambda x: commits_per_name[x] if x else 0)[1]
107 mail_vs_name[mail] = major_name 107 mail_vs_name[mail] = major_name
108 if commits_per_name[major_name] > MIN_COMMITS: 108 if commits_per_name[major_name] > MIN_COMMITS:
109 output[mail] = major_name 109 output[mail] = major_name
110 else: 110 else:
111 mail_vs_name[mail] = name 111 mail_vs_name[mail] = name
112 112
113 # [1] If there exists a mailmap file at the location pointed to 113 # [1] If there exists a mailmap file at the location pointed to
114 # by the mailmap.file configuration option, update it. 114 # by the mailmap.file configuration option, update it.
115 # [2] If the file .mailmap exists at the toplevel of the repository, update it. 115 # [2] If the file .mailmap exists at the toplevel of the repository, update it.
116 # [3] Otherwise, create a new mailmap file. 116 # [3] Otherwise, create a new mailmap file.
117 mailmap_files = [] 117 mailmap_files = []
118 118
119 try: 119 try:
120 config_mailmap = subprocess.check_output(['git', 'config', 'mailmap.file']) 120 config_mailmap = subprocess.check_output(['git', 'config', 'mailmap.file'])
121 except subprocess.CalledProcessError: 121 except subprocess.CalledProcessError:
122 config_mailmap = '' 122 config_mailmap = ''
123 123
124 config_mailmap = config_mailmap.rstrip() 124 config_mailmap = config_mailmap.rstrip()
125 if config_mailmap: 125 if config_mailmap:
126 mailmap_files.append(config_mailmap) 126 mailmap_files.append(config_mailmap)
127 127
128 mailmap_files.append('.mailmap') 128 mailmap_files.append('.mailmap')
129 129
130 infile = None 130 infile = None
131 131
132 for map_file in mailmap_files: 132 for map_file in mailmap_files:
133 try: 133 try:
134 infile = open(map_file) 134 infile = open(map_file)
135 except: 135 except:
136 # Failed to open. Try next. 136 # Failed to open. Try next.
137 continue 137 continue
138 break 138 break
139 139
140 comment_block = [] 140 comment_block = []
141 output_lines = [] 141 output_lines = []
142 142
143 if infile: 143 if infile:
144 for line in infile: 144 for line in infile:
145 if line[0] == '#' or line[0] == '\n': 145 if line[0] == '#' or line[0] == '\n':
146 comment_block.append(line) 146 comment_block.append(line)
147 else: 147 else:
148 output_lines.append(line) 148 output_lines.append(line)
149 break 149 break
150 for line in infile: 150 for line in infile:
151 output_lines.append(line) 151 output_lines.append(line)
152 infile.close() 152 infile.close()
153 153
154 for mail, name in output.items(): 154 for mail, name in output.items():
155 output_lines.append(name + ' ' + mail + '\n') 155 output_lines.append(name + ' ' + mail + '\n')
156 156
157 output_lines.sort() 157 output_lines.sort()
158 158
159 sys.stdout.write(''.join(comment_block + output_lines)) 159 sys.stdout.write(''.join(comment_block + output_lines))
160 160
1 #!/usr/bin/env python 1 #!/usr/bin/env python2
2 2
3 # Copyright (c) 2015 Stephen Warren 3 # Copyright (c) 2015 Stephen Warren
4 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. 4 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
5 # 5 #
6 # SPDX-License-Identifier: GPL-2.0 6 # SPDX-License-Identifier: GPL-2.0
7 7
8 # Wrapper script to invoke pytest with the directory name that contains the 8 # Wrapper script to invoke pytest with the directory name that contains the
9 # U-Boot tests. 9 # U-Boot tests.
10 10
11 import os 11 import os
12 import os.path 12 import os.path
13 import sys 13 import sys
14 14
15 # Get rid of argv[0] 15 # Get rid of argv[0]
16 sys.argv.pop(0) 16 sys.argv.pop(0)
17 17
18 # argv; py.test test_directory_name user-supplied-arguments 18 # argv; py.test test_directory_name user-supplied-arguments
19 args = ['py.test', os.path.dirname(__file__) + '/tests'] 19 args = ['py.test', os.path.dirname(__file__) + '/tests']
20 args.extend(sys.argv) 20 args.extend(sys.argv)
21 21
22 try: 22 try:
23 os.execvp('py.test', args) 23 os.execvp('py.test', args)
24 except: 24 except:
25 # Log full details of any exception for detailed analysis 25 # Log full details of any exception for detailed analysis
26 import traceback 26 import traceback
27 traceback.print_exc() 27 traceback.print_exc()
28 # Hint to the user that they likely simply haven't installed the required 28 # Hint to the user that they likely simply haven't installed the required
29 # dependencies. 29 # dependencies.
30 print >>sys.stderr, ''' 30 print >>sys.stderr, '''
31 exec(py.test) failed; perhaps you are missing some dependencies? 31 exec(py.test) failed; perhaps you are missing some dependencies?
32 See test/py/README.md for the list.''' 32 See test/py/README.md for the list.'''
33 sys.exit(1) 33 sys.exit(1)
34 34
tools/buildman/buildman.py
1 #!/usr/bin/env python 1 #!/usr/bin/env python2
2 # 2 #
3 # Copyright (c) 2012 The Chromium OS Authors. 3 # Copyright (c) 2012 The Chromium OS Authors.
4 # 4 #
5 # SPDX-License-Identifier: GPL-2.0+ 5 # SPDX-License-Identifier: GPL-2.0+
6 # 6 #
7 7
8 """See README for more information""" 8 """See README for more information"""
9 9
10 import multiprocessing 10 import multiprocessing
11 import os 11 import os
12 import re 12 import re
13 import sys 13 import sys
14 import unittest 14 import unittest
15 15
16 # Bring in the patman libraries 16 # Bring in the patman libraries
17 our_path = os.path.dirname(os.path.realpath(__file__)) 17 our_path = os.path.dirname(os.path.realpath(__file__))
18 sys.path.insert(1, os.path.join(our_path, '../patman')) 18 sys.path.insert(1, os.path.join(our_path, '../patman'))
19 19
20 # Our modules 20 # Our modules
21 import board 21 import board
22 import bsettings 22 import bsettings
23 import builder 23 import builder
24 import checkpatch 24 import checkpatch
25 import cmdline 25 import cmdline
26 import control 26 import control
27 import doctest 27 import doctest
28 import gitutil 28 import gitutil
29 import patchstream 29 import patchstream
30 import terminal 30 import terminal
31 import toolchain 31 import toolchain
32 32
33 def RunTests(skip_net_tests): 33 def RunTests(skip_net_tests):
34 import func_test 34 import func_test
35 import test 35 import test
36 import doctest 36 import doctest
37 37
38 result = unittest.TestResult() 38 result = unittest.TestResult()
39 for module in ['toolchain', 'gitutil']: 39 for module in ['toolchain', 'gitutil']:
40 suite = doctest.DocTestSuite(module) 40 suite = doctest.DocTestSuite(module)
41 suite.run(result) 41 suite.run(result)
42 42
43 sys.argv = [sys.argv[0]] 43 sys.argv = [sys.argv[0]]
44 if skip_net_tests: 44 if skip_net_tests:
45 test.use_network = False 45 test.use_network = False
46 for module in (test.TestBuild, func_test.TestFunctional): 46 for module in (test.TestBuild, func_test.TestFunctional):
47 suite = unittest.TestLoader().loadTestsFromTestCase(module) 47 suite = unittest.TestLoader().loadTestsFromTestCase(module)
48 suite.run(result) 48 suite.run(result)
49 49
50 print result 50 print result
51 for test, err in result.errors: 51 for test, err in result.errors:
52 print err 52 print err
53 for test, err in result.failures: 53 for test, err in result.failures:
54 print err 54 print err
55 55
56 56
57 options, args = cmdline.ParseArgs() 57 options, args = cmdline.ParseArgs()
58 58
59 # Run our meagre tests 59 # Run our meagre tests
60 if options.test: 60 if options.test:
61 RunTests(options.skip_net_tests) 61 RunTests(options.skip_net_tests)
62 62
63 # Build selected commits for selected boards 63 # Build selected commits for selected boards
64 else: 64 else:
65 bsettings.Setup(options.config_file) 65 bsettings.Setup(options.config_file)
66 ret_code = control.DoBuildman(options, args) 66 ret_code = control.DoBuildman(options, args)
67 sys.exit(ret_code) 67 sys.exit(ret_code)
68 68
1 #!/usr/bin/python 1 #!/usr/bin/env python2
2 # 2 #
3 # Copyright (C) 2016 Google, Inc 3 # Copyright (C) 2016 Google, Inc
4 # Written by Simon Glass <sjg@chromium.org> 4 # Written by Simon Glass <sjg@chromium.org>
5 # 5 #
6 # SPDX-License-Identifier: GPL-2.0+ 6 # SPDX-License-Identifier: GPL-2.0+
7 # 7 #
8 8
9 """Device tree to C tool 9 """Device tree to C tool
10 10
11 This tool converts a device tree binary file (.dtb) into two C files. The 11 This tool converts a device tree binary file (.dtb) into two C files. The
12 indent is to allow a C program to access data from the device tree without 12 indent is to allow a C program to access data from the device tree without
13 having to link against libfdt. By putting the data from the device tree into 13 having to link against libfdt. By putting the data from the device tree into
14 C structures, normal C code can be used. This helps to reduce the size of the 14 C structures, normal C code can be used. This helps to reduce the size of the
15 compiled program. 15 compiled program.
16 16
17 Dtoc produces two output files: 17 Dtoc produces two output files:
18 18
19 dt-structs.h - contains struct definitions 19 dt-structs.h - contains struct definitions
20 dt-platdata.c - contains data from the device tree using the struct 20 dt-platdata.c - contains data from the device tree using the struct
21 definitions, as well as U-Boot driver definitions. 21 definitions, as well as U-Boot driver definitions.
22 22
23 This tool is used in U-Boot to provide device tree data to SPL without 23 This tool is used in U-Boot to provide device tree data to SPL without
24 increasing the code size of SPL. This supports the CONFIG_SPL_OF_PLATDATA 24 increasing the code size of SPL. This supports the CONFIG_SPL_OF_PLATDATA
25 options. For more information about the use of this options and tool please 25 options. For more information about the use of this options and tool please
26 see doc/driver-model/of-plat.txt 26 see doc/driver-model/of-plat.txt
27 """ 27 """
28 28
29 from optparse import OptionParser 29 from optparse import OptionParser
30 import os 30 import os
31 import sys 31 import sys
32 import unittest 32 import unittest
33 33
34 # Bring in the patman libraries 34 # Bring in the patman libraries
35 our_path = os.path.dirname(os.path.realpath(__file__)) 35 our_path = os.path.dirname(os.path.realpath(__file__))
36 sys.path.append(os.path.join(our_path, '../patman')) 36 sys.path.append(os.path.join(our_path, '../patman'))
37 37
38 import dtb_platdata 38 import dtb_platdata
39 39
40 def run_tests(): 40 def run_tests():
41 """Run all the test we have for dtoc""" 41 """Run all the test we have for dtoc"""
42 import test_dtoc 42 import test_dtoc
43 43
44 result = unittest.TestResult() 44 result = unittest.TestResult()
45 sys.argv = [sys.argv[0]] 45 sys.argv = [sys.argv[0]]
46 for module in (test_dtoc.TestDtoc,): 46 for module in (test_dtoc.TestDtoc,):
47 suite = unittest.TestLoader().loadTestsFromTestCase(module) 47 suite = unittest.TestLoader().loadTestsFromTestCase(module)
48 suite.run(result) 48 suite.run(result)
49 49
50 print result 50 print result
51 for _, err in result.errors: 51 for _, err in result.errors:
52 print err 52 print err
53 for _, err in result.failures: 53 for _, err in result.failures:
54 print err 54 print err
55 55
56 if __name__ != '__main__': 56 if __name__ != '__main__':
57 sys.exit(1) 57 sys.exit(1)
58 58
59 parser = OptionParser() 59 parser = OptionParser()
60 parser.add_option('-d', '--dtb-file', action='store', 60 parser.add_option('-d', '--dtb-file', action='store',
61 help='Specify the .dtb input file') 61 help='Specify the .dtb input file')
62 parser.add_option('--include-disabled', action='store_true', 62 parser.add_option('--include-disabled', action='store_true',
63 help='Include disabled nodes') 63 help='Include disabled nodes')
64 parser.add_option('-o', '--output', action='store', default='-', 64 parser.add_option('-o', '--output', action='store', default='-',
65 help='Select output filename') 65 help='Select output filename')
66 parser.add_option('-t', '--test', action='store_true', dest='test', 66 parser.add_option('-t', '--test', action='store_true', dest='test',
67 default=False, help='run tests') 67 default=False, help='run tests')
68 (options, args) = parser.parse_args() 68 (options, args) = parser.parse_args()
69 69
70 # Run our meagre tests 70 # Run our meagre tests
71 if options.test: 71 if options.test:
72 run_tests() 72 run_tests()
73 73
74 else: 74 else:
75 dtb_platdata.run_steps(args, options.dtb_file, options.include_disabled, 75 dtb_platdata.run_steps(args, options.dtb_file, options.include_disabled,
76 options.output) 76 options.output)
77 77
tools/microcode-tool.py
1 #!/usr/bin/env python 1 #!/usr/bin/env python2
2 # 2 #
3 # Copyright (c) 2014 Google, Inc 3 # Copyright (c) 2014 Google, Inc
4 # 4 #
5 # SPDX-License-Identifier: GPL-2.0+ 5 # SPDX-License-Identifier: GPL-2.0+
6 # 6 #
7 # Intel microcode update tool 7 # Intel microcode update tool
8 8
9 from optparse import OptionParser 9 from optparse import OptionParser
10 import os 10 import os
11 import re 11 import re
12 import struct 12 import struct
13 import sys 13 import sys
14 14
15 MICROCODE_DIR = 'arch/x86/dts/microcode' 15 MICROCODE_DIR = 'arch/x86/dts/microcode'
16 16
17 class Microcode: 17 class Microcode:
18 """Holds information about the microcode for a particular model of CPU. 18 """Holds information about the microcode for a particular model of CPU.
19 19
20 Attributes: 20 Attributes:
21 name: Name of the CPU this microcode is for, including any version 21 name: Name of the CPU this microcode is for, including any version
22 information (e.g. 'm12206a7_00000029') 22 information (e.g. 'm12206a7_00000029')
23 model: Model code string (this is cpuid(1).eax, e.g. '206a7') 23 model: Model code string (this is cpuid(1).eax, e.g. '206a7')
24 words: List of hex words containing the microcode. The first 16 words 24 words: List of hex words containing the microcode. The first 16 words
25 are the public header. 25 are the public header.
26 """ 26 """
27 def __init__(self, name, data): 27 def __init__(self, name, data):
28 self.name = name 28 self.name = name
29 # Convert data into a list of hex words 29 # Convert data into a list of hex words
30 self.words = [] 30 self.words = []
31 for value in ''.join(data).split(','): 31 for value in ''.join(data).split(','):
32 hexval = value.strip() 32 hexval = value.strip()
33 if hexval: 33 if hexval:
34 self.words.append(int(hexval, 0)) 34 self.words.append(int(hexval, 0))
35 35
36 # The model is in the 4rd hex word 36 # The model is in the 4rd hex word
37 self.model = '%x' % self.words[3] 37 self.model = '%x' % self.words[3]
38 38
39 def ParseFile(fname): 39 def ParseFile(fname):
40 """Parse a micrcode.dat file and return the component parts 40 """Parse a micrcode.dat file and return the component parts
41 41
42 Args: 42 Args:
43 fname: Filename to parse 43 fname: Filename to parse
44 Returns: 44 Returns:
45 3-Tuple: 45 3-Tuple:
46 date: String containing date from the file's header 46 date: String containing date from the file's header
47 license_text: List of text lines for the license file 47 license_text: List of text lines for the license file
48 microcodes: List of Microcode objects from the file 48 microcodes: List of Microcode objects from the file
49 """ 49 """
50 re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$') 50 re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
51 re_license = re.compile('/[^-*+] *(.*)$') 51 re_license = re.compile('/[^-*+] *(.*)$')
52 re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE) 52 re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
53 microcodes = {} 53 microcodes = {}
54 license_text = [] 54 license_text = []
55 date = '' 55 date = ''
56 data = [] 56 data = []
57 name = None 57 name = None
58 with open(fname) as fd: 58 with open(fname) as fd:
59 for line in fd: 59 for line in fd:
60 line = line.rstrip() 60 line = line.rstrip()
61 m_date = re_date.match(line) 61 m_date = re_date.match(line)
62 m_license = re_license.match(line) 62 m_license = re_license.match(line)
63 m_name = re_name.match(line) 63 m_name = re_name.match(line)
64 if m_name: 64 if m_name:
65 if name: 65 if name:
66 microcodes[name] = Microcode(name, data) 66 microcodes[name] = Microcode(name, data)
67 name = m_name.group(1).lower() 67 name = m_name.group(1).lower()
68 data = [] 68 data = []
69 elif m_license: 69 elif m_license:
70 license_text.append(m_license.group(1)) 70 license_text.append(m_license.group(1))
71 elif m_date: 71 elif m_date:
72 date = m_date.group(1) 72 date = m_date.group(1)
73 else: 73 else:
74 data.append(line) 74 data.append(line)
75 if name: 75 if name:
76 microcodes[name] = Microcode(name, data) 76 microcodes[name] = Microcode(name, data)
77 return date, license_text, microcodes 77 return date, license_text, microcodes
78 78
79 def ParseHeaderFiles(fname_list): 79 def ParseHeaderFiles(fname_list):
80 """Parse a list of header files and return the component parts 80 """Parse a list of header files and return the component parts
81 81
82 Args: 82 Args:
83 fname_list: List of files to parse 83 fname_list: List of files to parse
84 Returns: 84 Returns:
85 date: String containing date from the file's header 85 date: String containing date from the file's header
86 license_text: List of text lines for the license file 86 license_text: List of text lines for the license file
87 microcodes: List of Microcode objects from the file 87 microcodes: List of Microcode objects from the file
88 """ 88 """
89 microcodes = {} 89 microcodes = {}
90 license_text = [] 90 license_text = []
91 date = '' 91 date = ''
92 name = None 92 name = None
93 for fname in fname_list: 93 for fname in fname_list:
94 name = os.path.basename(fname).lower() 94 name = os.path.basename(fname).lower()
95 name = os.path.splitext(name)[0] 95 name = os.path.splitext(name)[0]
96 data = [] 96 data = []
97 with open(fname) as fd: 97 with open(fname) as fd:
98 license_start = False 98 license_start = False
99 license_end = False 99 license_end = False
100 for line in fd: 100 for line in fd:
101 line = line.rstrip() 101 line = line.rstrip()
102 102
103 if len(line) >= 2: 103 if len(line) >= 2:
104 if line[0] == '/' and line[1] == '*': 104 if line[0] == '/' and line[1] == '*':
105 license_start = True 105 license_start = True
106 continue 106 continue
107 if line[0] == '*' and line[1] == '/': 107 if line[0] == '*' and line[1] == '/':
108 license_end = True 108 license_end = True
109 continue 109 continue
110 if license_start and not license_end: 110 if license_start and not license_end:
111 # Ignore blank line 111 # Ignore blank line
112 if len(line) > 0: 112 if len(line) > 0:
113 license_text.append(line) 113 license_text.append(line)
114 continue 114 continue
115 # Omit anything after the last comma 115 # Omit anything after the last comma
116 words = line.split(',')[:-1] 116 words = line.split(',')[:-1]
117 data += [word + ',' for word in words] 117 data += [word + ',' for word in words]
118 microcodes[name] = Microcode(name, data) 118 microcodes[name] = Microcode(name, data)
119 return date, license_text, microcodes 119 return date, license_text, microcodes
120 120
121 121
122 def List(date, microcodes, model): 122 def List(date, microcodes, model):
123 """List the available microcode chunks 123 """List the available microcode chunks
124 124
125 Args: 125 Args:
126 date: Date of the microcode file 126 date: Date of the microcode file
127 microcodes: Dict of Microcode objects indexed by name 127 microcodes: Dict of Microcode objects indexed by name
128 model: Model string to search for, or None 128 model: Model string to search for, or None
129 """ 129 """
130 print 'Date: %s' % date 130 print 'Date: %s' % date
131 if model: 131 if model:
132 mcode_list, tried = FindMicrocode(microcodes, model.lower()) 132 mcode_list, tried = FindMicrocode(microcodes, model.lower())
133 print 'Matching models %s:' % (', '.join(tried)) 133 print 'Matching models %s:' % (', '.join(tried))
134 else: 134 else:
135 print 'All models:' 135 print 'All models:'
136 mcode_list = [microcodes[m] for m in microcodes.keys()] 136 mcode_list = [microcodes[m] for m in microcodes.keys()]
137 for mcode in mcode_list: 137 for mcode in mcode_list:
138 print '%-20s: model %s' % (mcode.name, mcode.model) 138 print '%-20s: model %s' % (mcode.name, mcode.model)
139 139
140 def FindMicrocode(microcodes, model): 140 def FindMicrocode(microcodes, model):
141 """Find all the microcode chunks which match the given model. 141 """Find all the microcode chunks which match the given model.
142 142
143 This model is something like 306a9 (the value returned in eax from 143 This model is something like 306a9 (the value returned in eax from
144 cpuid(1) when running on Intel CPUs). But we allow a partial match, 144 cpuid(1) when running on Intel CPUs). But we allow a partial match,
145 omitting the last 1 or two characters to allow many families to have the 145 omitting the last 1 or two characters to allow many families to have the
146 same microcode. 146 same microcode.
147 147
148 If the model name is ambiguous we return a list of matches. 148 If the model name is ambiguous we return a list of matches.
149 149
150 Args: 150 Args:
151 microcodes: Dict of Microcode objects indexed by name 151 microcodes: Dict of Microcode objects indexed by name
152 model: String containing model name to find 152 model: String containing model name to find
153 Returns: 153 Returns:
154 Tuple: 154 Tuple:
155 List of matching Microcode objects 155 List of matching Microcode objects
156 List of abbreviations we tried 156 List of abbreviations we tried
157 """ 157 """
158 # Allow a full name to be used 158 # Allow a full name to be used
159 mcode = microcodes.get(model) 159 mcode = microcodes.get(model)
160 if mcode: 160 if mcode:
161 return [mcode], [] 161 return [mcode], []
162 162
163 tried = [] 163 tried = []
164 found = [] 164 found = []
165 for i in range(3): 165 for i in range(3):
166 abbrev = model[:-i] if i else model 166 abbrev = model[:-i] if i else model
167 tried.append(abbrev) 167 tried.append(abbrev)
168 for mcode in microcodes.values(): 168 for mcode in microcodes.values():
169 if mcode.model.startswith(abbrev): 169 if mcode.model.startswith(abbrev):
170 found.append(mcode) 170 found.append(mcode)
171 if found: 171 if found:
172 break 172 break
173 return found, tried 173 return found, tried
174 174
175 def CreateFile(date, license_text, mcodes, outfile): 175 def CreateFile(date, license_text, mcodes, outfile):
176 """Create a microcode file in U-Boot's .dtsi format 176 """Create a microcode file in U-Boot's .dtsi format
177 177
178 Args: 178 Args:
179 date: String containing date of original microcode file 179 date: String containing date of original microcode file
180 license: List of text lines for the license file 180 license: List of text lines for the license file
181 mcodes: Microcode objects to write (normally only 1) 181 mcodes: Microcode objects to write (normally only 1)
182 outfile: Filename to write to ('-' for stdout) 182 outfile: Filename to write to ('-' for stdout)
183 """ 183 """
184 out = '''/*%s 184 out = '''/*%s
185 * --- 185 * ---
186 * This is a device tree fragment. Use #include to add these properties to a 186 * This is a device tree fragment. Use #include to add these properties to a
187 * node. 187 * node.
188 * 188 *
189 * Date: %s 189 * Date: %s
190 */ 190 */
191 191
192 compatible = "intel,microcode"; 192 compatible = "intel,microcode";
193 intel,header-version = <%d>; 193 intel,header-version = <%d>;
194 intel,update-revision = <%#x>; 194 intel,update-revision = <%#x>;
195 intel,date-code = <%#x>; 195 intel,date-code = <%#x>;
196 intel,processor-signature = <%#x>; 196 intel,processor-signature = <%#x>;
197 intel,checksum = <%#x>; 197 intel,checksum = <%#x>;
198 intel,loader-revision = <%d>; 198 intel,loader-revision = <%d>;
199 intel,processor-flags = <%#x>; 199 intel,processor-flags = <%#x>;
200 200
201 /* The first 48-bytes are the public header which repeats the above data */ 201 /* The first 48-bytes are the public header which repeats the above data */
202 data = <%s 202 data = <%s
203 \t>;''' 203 \t>;'''
204 words = '' 204 words = ''
205 add_comments = len(mcodes) > 1 205 add_comments = len(mcodes) > 1
206 for mcode in mcodes: 206 for mcode in mcodes:
207 if add_comments: 207 if add_comments:
208 words += '\n/* %s */' % mcode.name 208 words += '\n/* %s */' % mcode.name
209 for i in range(len(mcode.words)): 209 for i in range(len(mcode.words)):
210 if not (i & 3): 210 if not (i & 3):
211 words += '\n' 211 words += '\n'
212 val = mcode.words[i] 212 val = mcode.words[i]
213 # Change each word so it will be little-endian in the FDT 213 # Change each word so it will be little-endian in the FDT
214 # This data is needed before RAM is available on some platforms so 214 # This data is needed before RAM is available on some platforms so
215 # we cannot do an endianness swap on boot. 215 # we cannot do an endianness swap on boot.
216 val = struct.unpack("<I", struct.pack(">I", val))[0] 216 val = struct.unpack("<I", struct.pack(">I", val))[0]
217 words += '\t%#010x' % val 217 words += '\t%#010x' % val
218 218
219 # Use the first microcode for the headers 219 # Use the first microcode for the headers
220 mcode = mcodes[0] 220 mcode = mcodes[0]
221 221
222 # Take care to avoid adding a space before a tab 222 # Take care to avoid adding a space before a tab
223 text = '' 223 text = ''
224 for line in license_text: 224 for line in license_text:
225 if line[0] == '\t': 225 if line[0] == '\t':
226 text += '\n *' + line 226 text += '\n *' + line
227 else: 227 else:
228 text += '\n * ' + line 228 text += '\n * ' + line
229 args = [text, date] 229 args = [text, date]
230 args += [mcode.words[i] for i in range(7)] 230 args += [mcode.words[i] for i in range(7)]
231 args.append(words) 231 args.append(words)
232 if outfile == '-': 232 if outfile == '-':
233 print out % tuple(args) 233 print out % tuple(args)
234 else: 234 else:
235 if not outfile: 235 if not outfile:
236 if not os.path.exists(MICROCODE_DIR): 236 if not os.path.exists(MICROCODE_DIR):
237 print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR 237 print >> sys.stderr, "Creating directory '%s'" % MICROCODE_DIR
238 os.makedirs(MICROCODE_DIR) 238 os.makedirs(MICROCODE_DIR)
239 outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi') 239 outfile = os.path.join(MICROCODE_DIR, mcode.name + '.dtsi')
240 print >> sys.stderr, "Writing microcode for '%s' to '%s'" % ( 240 print >> sys.stderr, "Writing microcode for '%s' to '%s'" % (
241 ', '.join([mcode.name for mcode in mcodes]), outfile) 241 ', '.join([mcode.name for mcode in mcodes]), outfile)
242 with open(outfile, 'w') as fd: 242 with open(outfile, 'w') as fd:
243 print >> fd, out % tuple(args) 243 print >> fd, out % tuple(args)
244 244
245 def MicrocodeTool(): 245 def MicrocodeTool():
246 """Run the microcode tool""" 246 """Run the microcode tool"""
247 commands = 'create,license,list'.split(',') 247 commands = 'create,license,list'.split(',')
248 parser = OptionParser() 248 parser = OptionParser()
249 parser.add_option('-d', '--mcfile', type='string', action='store', 249 parser.add_option('-d', '--mcfile', type='string', action='store',
250 help='Name of microcode.dat file') 250 help='Name of microcode.dat file')
251 parser.add_option('-H', '--headerfile', type='string', action='append', 251 parser.add_option('-H', '--headerfile', type='string', action='append',
252 help='Name of .h file containing microcode') 252 help='Name of .h file containing microcode')
253 parser.add_option('-m', '--model', type='string', action='store', 253 parser.add_option('-m', '--model', type='string', action='store',
254 help="Model name to extract ('all' for all)") 254 help="Model name to extract ('all' for all)")
255 parser.add_option('-M', '--multiple', type='string', action='store', 255 parser.add_option('-M', '--multiple', type='string', action='store',
256 help="Allow output of multiple models") 256 help="Allow output of multiple models")
257 parser.add_option('-o', '--outfile', type='string', action='store', 257 parser.add_option('-o', '--outfile', type='string', action='store',
258 help='Filename to use for output (- for stdout), default is' 258 help='Filename to use for output (- for stdout), default is'
259 ' %s/<name>.dtsi' % MICROCODE_DIR) 259 ' %s/<name>.dtsi' % MICROCODE_DIR)
260 parser.usage += """ command 260 parser.usage += """ command
261 261
262 Process an Intel microcode file (use -h for help). Commands: 262 Process an Intel microcode file (use -h for help). Commands:
263 263
264 create Create microcode .dtsi file for a model 264 create Create microcode .dtsi file for a model
265 list List available models in microcode file 265 list List available models in microcode file
266 license Print the license 266 license Print the license
267 267
268 Typical usage: 268 Typical usage:
269 269
270 ./tools/microcode-tool -d microcode.dat -m 306a create 270 ./tools/microcode-tool -d microcode.dat -m 306a create
271 271
272 This will find the appropriate file and write it to %s.""" % MICROCODE_DIR 272 This will find the appropriate file and write it to %s.""" % MICROCODE_DIR
273 273
274 (options, args) = parser.parse_args() 274 (options, args) = parser.parse_args()
275 if not args: 275 if not args:
276 parser.error('Please specify a command') 276 parser.error('Please specify a command')
277 cmd = args[0] 277 cmd = args[0]
278 if cmd not in commands: 278 if cmd not in commands:
279 parser.error("Unknown command '%s'" % cmd) 279 parser.error("Unknown command '%s'" % cmd)
280 280
281 if (not not options.mcfile) != (not not options.mcfile): 281 if (not not options.mcfile) != (not not options.mcfile):
282 parser.error("You must specify either header files or a microcode file, not both") 282 parser.error("You must specify either header files or a microcode file, not both")
283 if options.headerfile: 283 if options.headerfile:
284 date, license_text, microcodes = ParseHeaderFiles(options.headerfile) 284 date, license_text, microcodes = ParseHeaderFiles(options.headerfile)
285 elif options.mcfile: 285 elif options.mcfile:
286 date, license_text, microcodes = ParseFile(options.mcfile) 286 date, license_text, microcodes = ParseFile(options.mcfile)
287 else: 287 else:
288 parser.error('You must specify a microcode file (or header files)') 288 parser.error('You must specify a microcode file (or header files)')
289 289
290 if cmd == 'list': 290 if cmd == 'list':
291 List(date, microcodes, options.model) 291 List(date, microcodes, options.model)
292 elif cmd == 'license': 292 elif cmd == 'license':
293 print '\n'.join(license_text) 293 print '\n'.join(license_text)
294 elif cmd == 'create': 294 elif cmd == 'create':
295 if not options.model: 295 if not options.model:
296 parser.error('You must specify a model to create') 296 parser.error('You must specify a model to create')
297 model = options.model.lower() 297 model = options.model.lower()
298 if options.model == 'all': 298 if options.model == 'all':
299 options.multiple = True 299 options.multiple = True
300 mcode_list = microcodes.values() 300 mcode_list = microcodes.values()
301 tried = [] 301 tried = []
302 else: 302 else:
303 mcode_list, tried = FindMicrocode(microcodes, model) 303 mcode_list, tried = FindMicrocode(microcodes, model)
304 if not mcode_list: 304 if not mcode_list:
305 parser.error("Unknown model '%s' (%s) - try 'list' to list" % 305 parser.error("Unknown model '%s' (%s) - try 'list' to list" %
306 (model, ', '.join(tried))) 306 (model, ', '.join(tried)))
307 if not options.multiple and len(mcode_list) > 1: 307 if not options.multiple and len(mcode_list) > 1:
308 parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' " 308 parser.error("Ambiguous model '%s' (%s) matched %s - try 'list' "
309 "to list or specify a particular file" % 309 "to list or specify a particular file" %
310 (model, ', '.join(tried), 310 (model, ', '.join(tried),
311 ', '.join([m.name for m in mcode_list]))) 311 ', '.join([m.name for m in mcode_list])))
312 CreateFile(date, license_text, mcode_list, options.outfile) 312 CreateFile(date, license_text, mcode_list, options.outfile)
313 else: 313 else:
314 parser.error("Unknown command '%s'" % cmd) 314 parser.error("Unknown command '%s'" % cmd)
315 315
316 if __name__ == "__main__": 316 if __name__ == "__main__":
317 MicrocodeTool() 317 MicrocodeTool()
318 318
tools/patman/patman.py
1 #!/usr/bin/env python 1 #!/usr/bin/env python2
2 # 2 #
3 # Copyright (c) 2011 The Chromium OS Authors. 3 # Copyright (c) 2011 The Chromium OS Authors.
4 # 4 #
5 # SPDX-License-Identifier: GPL-2.0+ 5 # SPDX-License-Identifier: GPL-2.0+
6 # 6 #
7 7
8 """See README for more information""" 8 """See README for more information"""
9 9
10 from optparse import OptionParser 10 from optparse import OptionParser
11 import os 11 import os
12 import re 12 import re
13 import sys 13 import sys
14 import unittest 14 import unittest
15 15
16 # Our modules 16 # Our modules
17 try: 17 try:
18 from patman import checkpatch, command, gitutil, patchstream, \ 18 from patman import checkpatch, command, gitutil, patchstream, \
19 project, settings, terminal, test 19 project, settings, terminal, test
20 except ImportError: 20 except ImportError:
21 import checkpatch 21 import checkpatch
22 import command 22 import command
23 import gitutil 23 import gitutil
24 import patchstream 24 import patchstream
25 import project 25 import project
26 import settings 26 import settings
27 import terminal 27 import terminal
28 import test 28 import test
29 29
30 30
31 parser = OptionParser() 31 parser = OptionParser()
32 parser.add_option('-H', '--full-help', action='store_true', dest='full_help', 32 parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
33 default=False, help='Display the README file') 33 default=False, help='Display the README file')
34 parser.add_option('-c', '--count', dest='count', type='int', 34 parser.add_option('-c', '--count', dest='count', type='int',
35 default=-1, help='Automatically create patches from top n commits') 35 default=-1, help='Automatically create patches from top n commits')
36 parser.add_option('-i', '--ignore-errors', action='store_true', 36 parser.add_option('-i', '--ignore-errors', action='store_true',
37 dest='ignore_errors', default=False, 37 dest='ignore_errors', default=False,
38 help='Send patches email even if patch errors are found') 38 help='Send patches email even if patch errors are found')
39 parser.add_option('-m', '--no-maintainers', action='store_false', 39 parser.add_option('-m', '--no-maintainers', action='store_false',
40 dest='add_maintainers', default=True, 40 dest='add_maintainers', default=True,
41 help="Don't cc the file maintainers automatically") 41 help="Don't cc the file maintainers automatically")
42 parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run', 42 parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
43 default=False, help="Do a dry run (create but don't email patches)") 43 default=False, help="Do a dry run (create but don't email patches)")
44 parser.add_option('-p', '--project', default=project.DetectProject(), 44 parser.add_option('-p', '--project', default=project.DetectProject(),
45 help="Project name; affects default option values and " 45 help="Project name; affects default option values and "
46 "aliases [default: %default]") 46 "aliases [default: %default]")
47 parser.add_option('-r', '--in-reply-to', type='string', action='store', 47 parser.add_option('-r', '--in-reply-to', type='string', action='store',
48 help="Message ID that this series is in reply to") 48 help="Message ID that this series is in reply to")
49 parser.add_option('-s', '--start', dest='start', type='int', 49 parser.add_option('-s', '--start', dest='start', type='int',
50 default=0, help='Commit to start creating patches from (0 = HEAD)') 50 default=0, help='Commit to start creating patches from (0 = HEAD)')
51 parser.add_option('-t', '--ignore-bad-tags', action='store_true', 51 parser.add_option('-t', '--ignore-bad-tags', action='store_true',
52 default=False, help='Ignore bad tags / aliases') 52 default=False, help='Ignore bad tags / aliases')
53 parser.add_option('--test', action='store_true', dest='test', 53 parser.add_option('--test', action='store_true', dest='test',
54 default=False, help='run tests') 54 default=False, help='run tests')
55 parser.add_option('-v', '--verbose', action='store_true', dest='verbose', 55 parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
56 default=False, help='Verbose output of errors and warnings') 56 default=False, help='Verbose output of errors and warnings')
57 parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store', 57 parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store',
58 default=None, help='Output cc list for patch file (used by git)') 58 default=None, help='Output cc list for patch file (used by git)')
59 parser.add_option('--no-check', action='store_false', dest='check_patch', 59 parser.add_option('--no-check', action='store_false', dest='check_patch',
60 default=True, 60 default=True,
61 help="Don't check for patch compliance") 61 help="Don't check for patch compliance")
62 parser.add_option('--no-tags', action='store_false', dest='process_tags', 62 parser.add_option('--no-tags', action='store_false', dest='process_tags',
63 default=True, help="Don't process subject tags as aliaes") 63 default=True, help="Don't process subject tags as aliaes")
64 parser.add_option('-T', '--thread', action='store_true', dest='thread', 64 parser.add_option('-T', '--thread', action='store_true', dest='thread',
65 default=False, help='Create patches as a single thread') 65 default=False, help='Create patches as a single thread')
66 66
67 parser.usage += """ 67 parser.usage += """
68 68
69 Create patches from commits in a branch, check them and email them as 69 Create patches from commits in a branch, check them and email them as
70 specified by tags you place in the commits. Use -n to do a dry run first.""" 70 specified by tags you place in the commits. Use -n to do a dry run first."""
71 71
72 72
73 # Parse options twice: first to get the project and second to handle 73 # Parse options twice: first to get the project and second to handle
74 # defaults properly (which depends on project). 74 # defaults properly (which depends on project).
75 (options, args) = parser.parse_args() 75 (options, args) = parser.parse_args()
76 settings.Setup(parser, options.project, '') 76 settings.Setup(parser, options.project, '')
77 (options, args) = parser.parse_args() 77 (options, args) = parser.parse_args()
78 78
79 if __name__ != "__main__": 79 if __name__ != "__main__":
80 pass 80 pass
81 81
82 # Run our meagre tests 82 # Run our meagre tests
83 elif options.test: 83 elif options.test:
84 import doctest 84 import doctest
85 import func_test 85 import func_test
86 86
87 sys.argv = [sys.argv[0]] 87 sys.argv = [sys.argv[0]]
88 result = unittest.TestResult() 88 result = unittest.TestResult()
89 for module in (test.TestPatch, func_test.TestFunctional): 89 for module in (test.TestPatch, func_test.TestFunctional):
90 suite = unittest.TestLoader().loadTestsFromTestCase(module) 90 suite = unittest.TestLoader().loadTestsFromTestCase(module)
91 suite.run(result) 91 suite.run(result)
92 92
93 for module in ['gitutil', 'settings']: 93 for module in ['gitutil', 'settings']:
94 suite = doctest.DocTestSuite(module) 94 suite = doctest.DocTestSuite(module)
95 suite.run(result) 95 suite.run(result)
96 96
97 # TODO: Surely we can just 'print' result? 97 # TODO: Surely we can just 'print' result?
98 print(result) 98 print(result)
99 for test, err in result.errors: 99 for test, err in result.errors:
100 print(err) 100 print(err)
101 for test, err in result.failures: 101 for test, err in result.failures:
102 print(err) 102 print(err)
103 103
104 # Called from git with a patch filename as argument 104 # Called from git with a patch filename as argument
105 # Printout a list of additional CC recipients for this patch 105 # Printout a list of additional CC recipients for this patch
106 elif options.cc_cmd: 106 elif options.cc_cmd:
107 fd = open(options.cc_cmd, 'r') 107 fd = open(options.cc_cmd, 'r')
108 re_line = re.compile('(\S*) (.*)') 108 re_line = re.compile('(\S*) (.*)')
109 for line in fd.readlines(): 109 for line in fd.readlines():
110 match = re_line.match(line) 110 match = re_line.match(line)
111 if match and match.group(1) == args[0]: 111 if match and match.group(1) == args[0]:
112 for cc in match.group(2).split(', '): 112 for cc in match.group(2).split(', '):
113 cc = cc.strip() 113 cc = cc.strip()
114 if cc: 114 if cc:
115 print(cc) 115 print(cc)
116 fd.close() 116 fd.close()
117 117
118 elif options.full_help: 118 elif options.full_help:
119 pager = os.getenv('PAGER') 119 pager = os.getenv('PAGER')
120 if not pager: 120 if not pager:
121 pager = 'more' 121 pager = 'more'
122 fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 122 fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
123 'README') 123 'README')
124 command.Run(pager, fname) 124 command.Run(pager, fname)
125 125
126 # Process commits, produce patches files, check them, email them 126 # Process commits, produce patches files, check them, email them
127 else: 127 else:
128 gitutil.Setup() 128 gitutil.Setup()
129 129
130 if options.count == -1: 130 if options.count == -1:
131 # Work out how many patches to send if we can 131 # Work out how many patches to send if we can
132 options.count = gitutil.CountCommitsToBranch() - options.start 132 options.count = gitutil.CountCommitsToBranch() - options.start
133 133
134 col = terminal.Color() 134 col = terminal.Color()
135 if not options.count: 135 if not options.count:
136 str = 'No commits found to process - please use -c flag' 136 str = 'No commits found to process - please use -c flag'
137 sys.exit(col.Color(col.RED, str)) 137 sys.exit(col.Color(col.RED, str))
138 138
139 # Read the metadata from the commits 139 # Read the metadata from the commits
140 if options.count: 140 if options.count:
141 series = patchstream.GetMetaData(options.start, options.count) 141 series = patchstream.GetMetaData(options.start, options.count)
142 cover_fname, args = gitutil.CreatePatches(options.start, options.count, 142 cover_fname, args = gitutil.CreatePatches(options.start, options.count,
143 series) 143 series)
144 144
145 # Fix up the patch files to our liking, and insert the cover letter 145 # Fix up the patch files to our liking, and insert the cover letter
146 patchstream.FixPatches(series, args) 146 patchstream.FixPatches(series, args)
147 if cover_fname and series.get('cover'): 147 if cover_fname and series.get('cover'):
148 patchstream.InsertCoverLetter(cover_fname, series, options.count) 148 patchstream.InsertCoverLetter(cover_fname, series, options.count)
149 149
150 # Do a few checks on the series 150 # Do a few checks on the series
151 series.DoChecks() 151 series.DoChecks()
152 152
153 # Check the patches, and run them through 'git am' just to be sure 153 # Check the patches, and run them through 'git am' just to be sure
154 if options.check_patch: 154 if options.check_patch:
155 ok = checkpatch.CheckPatches(options.verbose, args) 155 ok = checkpatch.CheckPatches(options.verbose, args)
156 else: 156 else:
157 ok = True 157 ok = True
158 158
159 cc_file = series.MakeCcFile(options.process_tags, cover_fname, 159 cc_file = series.MakeCcFile(options.process_tags, cover_fname,
160 not options.ignore_bad_tags, 160 not options.ignore_bad_tags,
161 options.add_maintainers) 161 options.add_maintainers)
162 162
163 # Email the patches out (giving the user time to check / cancel) 163 # Email the patches out (giving the user time to check / cancel)
164 cmd = '' 164 cmd = ''
165 its_a_go = ok or options.ignore_errors 165 its_a_go = ok or options.ignore_errors
166 if its_a_go: 166 if its_a_go:
167 cmd = gitutil.EmailPatches(series, cover_fname, args, 167 cmd = gitutil.EmailPatches(series, cover_fname, args,
168 options.dry_run, not options.ignore_bad_tags, cc_file, 168 options.dry_run, not options.ignore_bad_tags, cc_file,
169 in_reply_to=options.in_reply_to, thread=options.thread) 169 in_reply_to=options.in_reply_to, thread=options.thread)
170 else: 170 else:
171 print(col.Color(col.RED, "Not sending emails due to errors/warnings")) 171 print(col.Color(col.RED, "Not sending emails due to errors/warnings"))
172 172
173 # For a dry run, just show our actions as a sanity check 173 # For a dry run, just show our actions as a sanity check
174 if options.dry_run: 174 if options.dry_run:
175 series.ShowActions(args, cmd, options.process_tags) 175 series.ShowActions(args, cmd, options.process_tags)
176 if not its_a_go: 176 if not its_a_go:
177 print(col.Color(col.RED, "Email would not be sent")) 177 print(col.Color(col.RED, "Email would not be sent"))
178 178
179 os.remove(cc_file) 179 os.remove(cc_file)
180 180
1 #!/usr/bin/python 1 #!/usr/bin/env python2
2 2
3 # Script to create enums from datasheet register tables 3 # Script to create enums from datasheet register tables
4 # 4 #
5 # Usage: 5 # Usage:
6 # 6 #
7 # First, create a text file from the datasheet: 7 # First, create a text file from the datasheet:
8 # pdftotext -layout /path/to/rockchip-3288-trm.pdf /tmp/asc 8 # pdftotext -layout /path/to/rockchip-3288-trm.pdf /tmp/asc
9 # 9 #
10 # Then use this script to output the #defines for a particular register: 10 # Then use this script to output the #defines for a particular register:
11 # ./tools/rkmux.py GRF_GPIO4C_IOMUX 11 # ./tools/rkmux.py GRF_GPIO4C_IOMUX
12 # 12 #
13 # It will create output suitable for putting in a header file, with SHIFT and 13 # It will create output suitable for putting in a header file, with SHIFT and
14 # MASK values for each bitfield in the register. 14 # MASK values for each bitfield in the register.
15 # 15 #
16 # Note: this tool is not perfect and you may need to edit the resulting code. 16 # Note: this tool is not perfect and you may need to edit the resulting code.
17 # But it should speed up the process. 17 # But it should speed up the process.
18 18
19 import csv 19 import csv
20 import re 20 import re
21 import sys 21 import sys
22 22
23 tab_to_col = 3 23 tab_to_col = 3
24 24
25 class RegField: 25 class RegField:
26 def __init__(self, cols=None): 26 def __init__(self, cols=None):
27 if cols: 27 if cols:
28 self.bits, self.attr, self.reset_val, self.desc = ( 28 self.bits, self.attr, self.reset_val, self.desc = (
29 [x.strip() for x in cols]) 29 [x.strip() for x in cols])
30 self.desc = [self.desc] 30 self.desc = [self.desc]
31 else: 31 else:
32 self.bits = '' 32 self.bits = ''
33 self.attr = '' 33 self.attr = ''
34 self.reset_val = '' 34 self.reset_val = ''
35 self.desc = [] 35 self.desc = []
36 36
37 def Setup(self, cols): 37 def Setup(self, cols):
38 self.bits, self.attr, self.reset_val = cols[0:3] 38 self.bits, self.attr, self.reset_val = cols[0:3]
39 if len(cols) > 3: 39 if len(cols) > 3:
40 self.desc.append(cols[3]) 40 self.desc.append(cols[3])
41 41
42 def AddDesc(self, desc): 42 def AddDesc(self, desc):
43 self.desc.append(desc) 43 self.desc.append(desc)
44 44
45 def Show(self): 45 def Show(self):
46 print self 46 print self
47 print 47 print
48 self.__init__() 48 self.__init__()
49 49
50 def __str__(self): 50 def __str__(self):
51 return '%s,%s,%s,%s' % (self.bits, self.attr, self.reset_val, 51 return '%s,%s,%s,%s' % (self.bits, self.attr, self.reset_val,
52 '\n'.join(self.desc)) 52 '\n'.join(self.desc))
53 53
54 class Printer: 54 class Printer:
55 def __init__(self, name): 55 def __init__(self, name):
56 self.first = True 56 self.first = True
57 self.name = name 57 self.name = name
58 self.re_sel = re.compile("[1-9]'b([01]+): (.*)") 58 self.re_sel = re.compile("[1-9]'b([01]+): (.*)")
59 59
60 def __enter__(self): 60 def __enter__(self):
61 return self 61 return self
62 62
63 def __exit__(self, type, value, traceback): 63 def __exit__(self, type, value, traceback):
64 if not self.first: 64 if not self.first:
65 self.output_footer() 65 self.output_footer()
66 66
67 def output_header(self): 67 def output_header(self):
68 print '/* %s */' % self.name 68 print '/* %s */' % self.name
69 print 'enum {' 69 print 'enum {'
70 70
71 def output_footer(self): 71 def output_footer(self):
72 print '};'; 72 print '};';
73 73
74 def output_regfield(self, regfield): 74 def output_regfield(self, regfield):
75 lines = regfield.desc 75 lines = regfield.desc
76 field = lines[0] 76 field = lines[0]
77 #print 'field:', field 77 #print 'field:', field
78 if field in ['reserved', 'reserve', 'write_enable', 'write_mask']: 78 if field in ['reserved', 'reserve', 'write_enable', 'write_mask']:
79 return 79 return
80 if field.endswith('_sel') or field.endswith('_con'): 80 if field.endswith('_sel') or field.endswith('_con'):
81 field = field[:-4] 81 field = field[:-4]
82 elif field.endswith(' iomux'): 82 elif field.endswith(' iomux'):
83 field = field[:-6] 83 field = field[:-6]
84 elif field.endswith('_mode') or field.endswith('_mask'): 84 elif field.endswith('_mode') or field.endswith('_mask'):
85 field = field[:-5] 85 field = field[:-5]
86 #else: 86 #else:
87 #print 'bad field %s' % field 87 #print 'bad field %s' % field
88 #return 88 #return
89 field = field.upper() 89 field = field.upper()
90 if ':' in regfield.bits: 90 if ':' in regfield.bits:
91 bit_high, bit_low = [int(x) for x in regfield.bits.split(':')] 91 bit_high, bit_low = [int(x) for x in regfield.bits.split(':')]
92 else: 92 else:
93 bit_high = bit_low = int(regfield.bits) 93 bit_high = bit_low = int(regfield.bits)
94 bit_width = bit_high - bit_low + 1 94 bit_width = bit_high - bit_low + 1
95 mask = (1 << bit_width) - 1 95 mask = (1 << bit_width) - 1
96 if self.first: 96 if self.first:
97 self.first = False 97 self.first = False
98 self.output_header() 98 self.output_header()
99 else: 99 else:
100 print 100 print
101 out_enum(field, 'shift', bit_low) 101 out_enum(field, 'shift', bit_low)
102 out_enum(field, 'mask', mask) 102 out_enum(field, 'mask', mask)
103 next_val = -1 103 next_val = -1
104 #print 'lines: %s', lines 104 #print 'lines: %s', lines
105 for line in lines: 105 for line in lines:
106 m = self.re_sel.match(line) 106 m = self.re_sel.match(line)
107 if m: 107 if m:
108 val, enum = int(m.group(1), 2), m.group(2) 108 val, enum = int(m.group(1), 2), m.group(2)
109 if enum not in ['reserved', 'reserve']: 109 if enum not in ['reserved', 'reserve']:
110 out_enum(field, enum, val, val == next_val) 110 out_enum(field, enum, val, val == next_val)
111 next_val = val + 1 111 next_val = val + 1
112 112
113 113
114 def process_file(name, fd): 114 def process_file(name, fd):
115 field = RegField() 115 field = RegField()
116 reg = '' 116 reg = ''
117 117
118 fields = [] 118 fields = []
119 119
120 def add_it(field): 120 def add_it(field):
121 if field.bits: 121 if field.bits:
122 if reg == name: 122 if reg == name:
123 fields.append(field) 123 fields.append(field)
124 field = RegField() 124 field = RegField()
125 return field 125 return field
126 126
127 def is_field_start(line): 127 def is_field_start(line):
128 if '=' in line or '+' in line: 128 if '=' in line or '+' in line:
129 return False 129 return False
130 if (line.startswith('gpio') or line.startswith('peri_') or 130 if (line.startswith('gpio') or line.startswith('peri_') or
131 line.endswith('_sel') or line.endswith('_con')): 131 line.endswith('_sel') or line.endswith('_con')):
132 return True 132 return True
133 if not ' ' in line: # and '_' in line: 133 if not ' ' in line: # and '_' in line:
134 return True 134 return True
135 return False 135 return False
136 136
137 for line in fd: 137 for line in fd:
138 line = line.rstrip() 138 line = line.rstrip()
139 if line[:4] in ['GRF_', 'PMU_', 'CRU_']: 139 if line[:4] in ['GRF_', 'PMU_', 'CRU_']:
140 field = add_it(field) 140 field = add_it(field)
141 reg = line 141 reg = line
142 do_this = name == reg 142 do_this = name == reg
143 elif not line or not line.startswith(' '): 143 elif not line or not line.startswith(' '):
144 continue 144 continue
145 line = line.replace('\xe2\x80\x99', "'") 145 line = line.replace('\xe2\x80\x99', "'")
146 leading = len(line) - len(line.lstrip()) 146 leading = len(line) - len(line.lstrip())
147 line = line.lstrip() 147 line = line.lstrip()
148 cols = re.split(' *', line, 3) 148 cols = re.split(' *', line, 3)
149 if leading > 15 or (len(cols) > 3 and is_field_start(cols[3])): 149 if leading > 15 or (len(cols) > 3 and is_field_start(cols[3])):
150 if is_field_start(line): 150 if is_field_start(line):
151 field = add_it(field) 151 field = add_it(field)
152 field.AddDesc(line) 152 field.AddDesc(line)
153 else: 153 else:
154 if cols[0] == 'Bit' or len(cols) < 3: 154 if cols[0] == 'Bit' or len(cols) < 3:
155 continue 155 continue
156 #print 156 #print
157 #print field 157 #print field
158 field = add_it(field) 158 field = add_it(field)
159 field.Setup(cols) 159 field.Setup(cols)
160 field = add_it(field) 160 field = add_it(field)
161 161
162 with Printer(name) as printer: 162 with Printer(name) as printer:
163 for field in fields: 163 for field in fields:
164 #print field 164 #print field
165 printer.output_regfield(field) 165 printer.output_regfield(field)
166 #print 166 #print
167 167
168 def out_enum(field, suffix, value, skip_val=False): 168 def out_enum(field, suffix, value, skip_val=False):
169 str = '%s_%s' % (field.upper(), suffix.upper()) 169 str = '%s_%s' % (field.upper(), suffix.upper())
170 if not skip_val: 170 if not skip_val:
171 tabs = tab_to_col - len(str) / 8 171 tabs = tab_to_col - len(str) / 8
172 if value > 9: 172 if value > 9:
173 val_str = '%#x' % value 173 val_str = '%#x' % value
174 else: 174 else:
175 val_str = '%d' % value 175 val_str = '%d' % value
176 176
177 str += '%s= %s' % ('\t' * tabs, val_str) 177 str += '%s= %s' % ('\t' * tabs, val_str)
178 print '\t%s,' % str 178 print '\t%s,' % str
179 179
180 # Process a CSV file, e.g. from tabula 180 # Process a CSV file, e.g. from tabula
181 def process_csv(name, fd): 181 def process_csv(name, fd):
182 reader = csv.reader(fd) 182 reader = csv.reader(fd)
183 183
184 rows = [] 184 rows = []
185 185
186 field = RegField() 186 field = RegField()
187 for row in reader: 187 for row in reader:
188 #print field.desc 188 #print field.desc
189 if not row[0]: 189 if not row[0]:
190 field.desc.append(row[3]) 190 field.desc.append(row[3])
191 continue 191 continue
192 if field.bits: 192 if field.bits:
193 if field.bits != 'Bit': 193 if field.bits != 'Bit':
194 rows.append(field) 194 rows.append(field)
195 #print row 195 #print row
196 field = RegField(row) 196 field = RegField(row)
197 197
198 with Printer(name) as printer: 198 with Printer(name) as printer:
199 for row in rows: 199 for row in rows:
200 #print field 200 #print field
201 printer.output_regfield(row) 201 printer.output_regfield(row)
202 #print 202 #print
203 203
204 fname = sys.argv[1] 204 fname = sys.argv[1]
205 name = sys.argv[2] 205 name = sys.argv[2]
206 206
207 # Read output from pdftotext -layout 207 # Read output from pdftotext -layout
208 if 1: 208 if 1:
209 with open(fname, 'r') as fd: 209 with open(fname, 'r') as fd:
210 process_file(name, fd) 210 process_file(name, fd)
211 211
212 # Use tabula 212 # Use tabula
213 # It seems to be better at outputting text for an entire cell in one cell. 213 # It seems to be better at outputting text for an entire cell in one cell.
214 # But it does not always work. E.g. GRF_GPIO7CH_IOMUX. 214 # But it does not always work. E.g. GRF_GPIO7CH_IOMUX.
215 # So there is no point in using it. 215 # So there is no point in using it.
216 if 0: 216 if 0:
217 with open(fname, 'r') as fd: 217 with open(fname, 'r') as fd:
218 process_csv(name, fd) 218 process_csv(name, fd)
219 219