Python cleanups, round 1: whitespace
- Use 4 spaces instead of 2 for indentation. This is Python standard and is also in Google's styleguide for Python: https://google.github.io/styleguide/pyguide.html#Indentation - Use 2 newlines between functions/classes This does introduce a few line-too-long errors to clean up which will be fixed in the follow-up commit, but wanted to keep this as easy to review as possible (git diff -w should be minimal) Change-Id: I463f18d11e72745107350ac0ae5588d1fb626ed6
This commit is contained in:
parent
c1399cf4c7
commit
dd22470db8
@ -229,54 +229,54 @@ opts.add_option('--no-searchbox', action="store_false", dest='searchbox',
|
|||||||
options, _ = opts.parse_args()
|
options, _ = opts.parse_args()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
out_file = open(options.out, 'w', errors='ignore')
|
out_file = open(options.out, 'w', errors='ignore')
|
||||||
src_file = open(options.src, 'r', errors='ignore')
|
src_file = open(options.src, 'r', errors='ignore')
|
||||||
except TypeError:
|
except TypeError:
|
||||||
out_file = open(options.out, 'w')
|
out_file = open(options.out, 'w')
|
||||||
src_file = open(options.src, 'r')
|
src_file = open(options.src, 'r')
|
||||||
last_line = ''
|
last_line = ''
|
||||||
ignore_next_line = False
|
ignore_next_line = False
|
||||||
last_title = ''
|
last_title = ''
|
||||||
for line in src_file:
|
for line in src_file:
|
||||||
if PAT_GERRIT.match(last_line):
|
if PAT_GERRIT.match(last_line):
|
||||||
# Case of "GERRIT\n------" at the footer
|
# Case of "GERRIT\n------" at the footer
|
||||||
out_file.write(GERRIT_UPLINK)
|
out_file.write(GERRIT_UPLINK)
|
||||||
last_line = ''
|
last_line = ''
|
||||||
elif PAT_SEARCHBOX.match(last_line):
|
elif PAT_SEARCHBOX.match(last_line):
|
||||||
# Case of 'SEARCHBOX\n---------'
|
# Case of 'SEARCHBOX\n---------'
|
||||||
if options.searchbox:
|
if options.searchbox:
|
||||||
out_file.write(SEARCH_BOX)
|
out_file.write(SEARCH_BOX)
|
||||||
last_line = ''
|
last_line = ''
|
||||||
elif PAT_INCLUDE.match(line):
|
elif PAT_INCLUDE.match(line):
|
||||||
# Case of 'include::<filename>'
|
# Case of 'include::<filename>'
|
||||||
match = PAT_INCLUDE.match(line)
|
match = PAT_INCLUDE.match(line)
|
||||||
out_file.write(last_line)
|
out_file.write(last_line)
|
||||||
last_line = match.group(1) + options.suffix + match.group(2) + '\n'
|
last_line = match.group(1) + options.suffix + match.group(2) + '\n'
|
||||||
elif PAT_STARS.match(line):
|
elif PAT_STARS.match(line):
|
||||||
if PAT_TITLE.match(last_line):
|
if PAT_TITLE.match(last_line):
|
||||||
# Case of the title in '.<title>\n****\nget::<url>\n****'
|
# Case of the title in '.<title>\n****\nget::<url>\n****'
|
||||||
match = PAT_TITLE.match(last_line)
|
match = PAT_TITLE.match(last_line)
|
||||||
last_title = GET_TITLE % match.group(1)
|
last_title = GET_TITLE % match.group(1)
|
||||||
else:
|
else:
|
||||||
out_file.write(last_line)
|
out_file.write(last_line)
|
||||||
last_title = ''
|
last_title = ''
|
||||||
elif PAT_GET.match(line):
|
elif PAT_GET.match(line):
|
||||||
# Case of '****\nget::<url>\n****' in rest api
|
# Case of '****\nget::<url>\n****' in rest api
|
||||||
url = PAT_GET.match(line).group(1)
|
url = PAT_GET.match(line).group(1)
|
||||||
out_file.write(GET_MACRO.format(url) % last_title)
|
out_file.write(GET_MACRO.format(url) % last_title)
|
||||||
ignore_next_line = True
|
ignore_next_line = True
|
||||||
elif ignore_next_line:
|
elif ignore_next_line:
|
||||||
# Handle the trailing '****' of the 'get::' case
|
# Handle the trailing '****' of the 'get::' case
|
||||||
last_line = ''
|
last_line = ''
|
||||||
ignore_next_line = False
|
ignore_next_line = False
|
||||||
else:
|
else:
|
||||||
out_file.write(last_line)
|
out_file.write(last_line)
|
||||||
last_line = line
|
last_line = line
|
||||||
out_file.write(last_line)
|
out_file.write(last_line)
|
||||||
out_file.write(LINK_SCRIPT)
|
out_file.write(LINK_SCRIPT)
|
||||||
out_file.close()
|
out_file.close()
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
"error while expanding %s to %s: %s" % (options.src, options.out, err))
|
"error while expanding %s to %s: %s" % (options.src, options.out, err))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
@ -17,6 +17,7 @@ FAILURE_MESSAGE = 'This commit message does not match the standard.' \
|
|||||||
PASS_SCORE = '--code-review=0'
|
PASS_SCORE = '--code-review=0'
|
||||||
PASS_MESSAGE = ''
|
PASS_MESSAGE = ''
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
change = None
|
change = None
|
||||||
project = None
|
project = None
|
||||||
@ -49,7 +50,7 @@ def main():
|
|||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
if change == None or project == None or branch == None \
|
if change == None or project == None or branch == None \
|
||||||
or commit == None or patchset == None:
|
or commit == None or patchset == None:
|
||||||
usage()
|
usage()
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ def main():
|
|||||||
|
|
||||||
if status != 0:
|
if status != 0:
|
||||||
print('Error running \'%s\'. status: %s, output:\n\n%s' % \
|
print('Error running \'%s\'. status: %s, output:\n\n%s' % \
|
||||||
(command, status, output))
|
(command, status, output))
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
commitMessage = output[(output.find('\n\n')+2):]
|
commitMessage = output[(output.find('\n\n')+2):]
|
||||||
@ -66,7 +67,7 @@ def main():
|
|||||||
|
|
||||||
if len(commitLines) > 1 and len(commitLines[1]) != 0:
|
if len(commitLines) > 1 and len(commitLines[1]) != 0:
|
||||||
fail(commit, 'Invalid commit summary. The summary must be ' \
|
fail(commit, 'Invalid commit summary. The summary must be ' \
|
||||||
+ 'one line followed by a blank line.')
|
+ 'one line followed by a blank line.')
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
for line in commitLines:
|
for line in commitLines:
|
||||||
@ -76,23 +77,27 @@ def main():
|
|||||||
|
|
||||||
passes(commit)
|
passes(commit)
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print('Usage:\n')
|
print('Usage:\n')
|
||||||
print(sys.argv[0] + ' --change <change id> --project <project name> ' \
|
print(sys.argv[0] + ' --change <change id> --project <project name> ' \
|
||||||
+ '--branch <branch> --commit <sha1> --patchset <patchset id>')
|
+ '--branch <branch> --commit <sha1> --patchset <patchset id>')
|
||||||
|
|
||||||
def fail( commit, message ):
|
|
||||||
|
def fail(commit, message):
|
||||||
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
|
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
|
||||||
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
|
+ _shell_escape(FAILURE_MESSAGE + '\n\n' + message) \
|
||||||
+ '\\\" ' + commit
|
+ '\\\" ' + commit
|
||||||
subprocess.getstatusoutput(command)
|
subprocess.getstatusoutput(command)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def passes( commit ):
|
|
||||||
|
def passes(commit):
|
||||||
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
|
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
|
||||||
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
|
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
|
||||||
subprocess.getstatusoutput(command)
|
subprocess.getstatusoutput(command)
|
||||||
|
|
||||||
|
|
||||||
def _shell_escape(x):
|
def _shell_escape(x):
|
||||||
s = ''
|
s = ''
|
||||||
for c in x:
|
for c in x:
|
||||||
@ -102,6 +107,6 @@ def _shell_escape(x):
|
|||||||
s = s + c
|
s = s + c
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
@ -47,228 +47,229 @@ HEADERS = {"Content-Type": "application/json", "charset": "UTF-8"}
|
|||||||
|
|
||||||
# Random names from US Census Data
|
# Random names from US Census Data
|
||||||
FIRST_NAMES = [
|
FIRST_NAMES = [
|
||||||
"Casey", "Yesenia", "Shirley", "Tara", "Wanda", "Sheryl", "Jaime", "Elaine",
|
"Casey", "Yesenia", "Shirley", "Tara", "Wanda", "Sheryl", "Jaime", "Elaine",
|
||||||
"Charlotte", "Carly", "Bonnie", "Kirsten", "Kathryn", "Carla", "Katrina",
|
"Charlotte", "Carly", "Bonnie", "Kirsten", "Kathryn", "Carla", "Katrina",
|
||||||
"Melody", "Suzanne", "Sandy", "Joann", "Kristie", "Sally", "Emma", "Susan",
|
"Melody", "Suzanne", "Sandy", "Joann", "Kristie", "Sally", "Emma", "Susan",
|
||||||
"Amanda", "Alyssa", "Patty", "Angie", "Dominique", "Cynthia", "Jennifer",
|
"Amanda", "Alyssa", "Patty", "Angie", "Dominique", "Cynthia", "Jennifer",
|
||||||
"Theresa", "Desiree", "Kaylee", "Maureen", "Jeanne", "Kellie", "Valerie",
|
"Theresa", "Desiree", "Kaylee", "Maureen", "Jeanne", "Kellie", "Valerie",
|
||||||
"Nina", "Judy", "Diamond", "Anita", "Rebekah", "Stefanie", "Kendra", "Erin",
|
"Nina", "Judy", "Diamond", "Anita", "Rebekah", "Stefanie", "Kendra", "Erin",
|
||||||
"Tammie", "Tracey", "Bridget", "Krystal", "Jasmin", "Sonia", "Meghan",
|
"Tammie", "Tracey", "Bridget", "Krystal", "Jasmin", "Sonia", "Meghan",
|
||||||
"Rebecca", "Jeanette", "Meredith", "Beverly", "Natasha", "Chloe", "Selena",
|
"Rebecca", "Jeanette", "Meredith", "Beverly", "Natasha", "Chloe", "Selena",
|
||||||
"Teresa", "Sheena", "Cassandra", "Rhonda", "Tami", "Jodi", "Shelly", "Angela",
|
"Teresa", "Sheena", "Cassandra", "Rhonda", "Tami", "Jodi", "Shelly", "Angela",
|
||||||
"Kimberly", "Terry", "Joanna", "Isabella", "Lindsey", "Loretta", "Dana",
|
"Kimberly", "Terry", "Joanna", "Isabella", "Lindsey", "Loretta", "Dana",
|
||||||
"Veronica", "Carolyn", "Laura", "Karen", "Dawn", "Alejandra", "Cassie",
|
"Veronica", "Carolyn", "Laura", "Karen", "Dawn", "Alejandra", "Cassie",
|
||||||
"Lorraine", "Yolanda", "Kerry", "Stephanie", "Caitlin", "Melanie", "Kerri",
|
"Lorraine", "Yolanda", "Kerry", "Stephanie", "Caitlin", "Melanie", "Kerri",
|
||||||
"Doris", "Sandra", "Beth", "Carol", "Vicki", "Shelia", "Bethany", "Rachael",
|
"Doris", "Sandra", "Beth", "Carol", "Vicki", "Shelia", "Bethany", "Rachael",
|
||||||
"Donna", "Alexandra", "Barbara", "Ana", "Jillian", "Ann", "Rachel", "Lauren",
|
"Donna", "Alexandra", "Barbara", "Ana", "Jillian", "Ann", "Rachel", "Lauren",
|
||||||
"Hayley", "Misty", "Brianna", "Tanya", "Danielle", "Courtney", "Jacqueline",
|
"Hayley", "Misty", "Brianna", "Tanya", "Danielle", "Courtney", "Jacqueline",
|
||||||
"Becky", "Christy", "Alisha", "Phyllis", "Faith", "Jocelyn", "Nancy",
|
"Becky", "Christy", "Alisha", "Phyllis", "Faith", "Jocelyn", "Nancy",
|
||||||
"Gloria", "Kristen", "Evelyn", "Julie", "Julia", "Kara", "Chelsey", "Cassidy",
|
"Gloria", "Kristen", "Evelyn", "Julie", "Julia", "Kara", "Chelsey", "Cassidy",
|
||||||
"Jean", "Chelsea", "Jenny", "Diana", "Haley", "Kristine", "Kristina", "Erika",
|
"Jean", "Chelsea", "Jenny", "Diana", "Haley", "Kristine", "Kristina", "Erika",
|
||||||
"Jenna", "Alison", "Deanna", "Abigail", "Melissa", "Sierra", "Linda",
|
"Jenna", "Alison", "Deanna", "Abigail", "Melissa", "Sierra", "Linda",
|
||||||
"Monica", "Tasha", "Traci", "Yvonne", "Tracy", "Marie", "Maria", "Michaela",
|
"Monica", "Tasha", "Traci", "Yvonne", "Tracy", "Marie", "Maria", "Michaela",
|
||||||
"Stacie", "April", "Morgan", "Cathy", "Darlene", "Cristina", "Emily"
|
"Stacie", "April", "Morgan", "Cathy", "Darlene", "Cristina", "Emily"
|
||||||
"Ian", "Russell", "Phillip", "Jay", "Barry", "Brad", "Frederick", "Fernando",
|
"Ian", "Russell", "Phillip", "Jay", "Barry", "Brad", "Frederick", "Fernando",
|
||||||
"Timothy", "Ricardo", "Bernard", "Daniel", "Ruben", "Alexis", "Kyle", "Malik",
|
"Timothy", "Ricardo", "Bernard", "Daniel", "Ruben", "Alexis", "Kyle", "Malik",
|
||||||
"Norman", "Kent", "Melvin", "Stephen", "Daryl", "Kurt", "Greg", "Alex",
|
"Norman", "Kent", "Melvin", "Stephen", "Daryl", "Kurt", "Greg", "Alex",
|
||||||
"Mario", "Riley", "Marvin", "Dan", "Steven", "Roberto", "Lucas", "Leroy",
|
"Mario", "Riley", "Marvin", "Dan", "Steven", "Roberto", "Lucas", "Leroy",
|
||||||
"Preston", "Drew", "Fred", "Casey", "Wesley", "Elijah", "Reginald", "Joel",
|
"Preston", "Drew", "Fred", "Casey", "Wesley", "Elijah", "Reginald", "Joel",
|
||||||
"Christopher", "Jacob", "Luis", "Philip", "Mark", "Rickey", "Todd", "Scott",
|
"Christopher", "Jacob", "Luis", "Philip", "Mark", "Rickey", "Todd", "Scott",
|
||||||
"Terrence", "Jim", "Stanley", "Bobby", "Thomas", "Gabriel", "Tracy", "Marcus",
|
"Terrence", "Jim", "Stanley", "Bobby", "Thomas", "Gabriel", "Tracy", "Marcus",
|
||||||
"Peter", "Michael", "Calvin", "Herbert", "Darryl", "Billy", "Ross", "Dustin",
|
"Peter", "Michael", "Calvin", "Herbert", "Darryl", "Billy", "Ross", "Dustin",
|
||||||
"Jaime", "Adam", "Henry", "Xavier", "Dominic", "Lonnie", "Danny", "Victor",
|
"Jaime", "Adam", "Henry", "Xavier", "Dominic", "Lonnie", "Danny", "Victor",
|
||||||
"Glen", "Perry", "Jackson", "Grant", "Gerald", "Garrett", "Alejandro",
|
"Glen", "Perry", "Jackson", "Grant", "Gerald", "Garrett", "Alejandro",
|
||||||
"Eddie", "Alan", "Ronnie", "Mathew", "Dave", "Wayne", "Joe", "Craig",
|
"Eddie", "Alan", "Ronnie", "Mathew", "Dave", "Wayne", "Joe", "Craig",
|
||||||
"Terry", "Chris", "Randall", "Parker", "Francis", "Keith", "Neil", "Caleb",
|
"Terry", "Chris", "Randall", "Parker", "Francis", "Keith", "Neil", "Caleb",
|
||||||
"Jon", "Earl", "Taylor", "Bryce", "Brady", "Max", "Sergio", "Leon", "Gene",
|
"Jon", "Earl", "Taylor", "Bryce", "Brady", "Max", "Sergio", "Leon", "Gene",
|
||||||
"Darin", "Bill", "Edgar", "Antonio", "Dalton", "Arthur", "Austin", "Cristian",
|
"Darin", "Bill", "Edgar", "Antonio", "Dalton", "Arthur", "Austin", "Cristian",
|
||||||
"Kevin", "Omar", "Kelly", "Aaron", "Ethan", "Tom", "Isaac", "Maurice",
|
"Kevin", "Omar", "Kelly", "Aaron", "Ethan", "Tom", "Isaac", "Maurice",
|
||||||
"Gilbert", "Hunter", "Willie", "Harry", "Dale", "Darius", "Jerome", "Jason",
|
"Gilbert", "Hunter", "Willie", "Harry", "Dale", "Darius", "Jerome", "Jason",
|
||||||
"Harold", "Kerry", "Clarence", "Gregg", "Shane", "Eduardo", "Micheal",
|
"Harold", "Kerry", "Clarence", "Gregg", "Shane", "Eduardo", "Micheal",
|
||||||
"Howard", "Vernon", "Rodney", "Anthony", "Levi", "Larry", "Franklin", "Jimmy",
|
"Howard", "Vernon", "Rodney", "Anthony", "Levi", "Larry", "Franklin", "Jimmy",
|
||||||
"Jonathon", "Carl",
|
"Jonathon", "Carl",
|
||||||
]
|
]
|
||||||
|
|
||||||
LAST_NAMES = [
|
LAST_NAMES = [
|
||||||
"Savage", "Hendrix", "Moon", "Larsen", "Rocha", "Burgess", "Bailey", "Farley",
|
"Savage", "Hendrix", "Moon", "Larsen", "Rocha", "Burgess", "Bailey", "Farley",
|
||||||
"Moses", "Schmidt", "Brown", "Hoover", "Klein", "Jennings", "Braun", "Rangel",
|
"Moses", "Schmidt", "Brown", "Hoover", "Klein", "Jennings", "Braun", "Rangel",
|
||||||
"Casey", "Dougherty", "Hancock", "Wolf", "Henry", "Thomas", "Bentley",
|
"Casey", "Dougherty", "Hancock", "Wolf", "Henry", "Thomas", "Bentley",
|
||||||
"Barnett", "Kline", "Pitts", "Rojas", "Sosa", "Paul", "Hess", "Chase",
|
"Barnett", "Kline", "Pitts", "Rojas", "Sosa", "Paul", "Hess", "Chase",
|
||||||
"Mckay", "Bender", "Colins", "Montoya", "Townsend", "Potts", "Ayala", "Avery",
|
"Mckay", "Bender", "Colins", "Montoya", "Townsend", "Potts", "Ayala", "Avery",
|
||||||
"Sherman", "Tapia", "Hamilton", "Ferguson", "Huang", "Hooper", "Zamora",
|
"Sherman", "Tapia", "Hamilton", "Ferguson", "Huang", "Hooper", "Zamora",
|
||||||
"Logan", "Lloyd", "Quinn", "Monroe", "Brock", "Ibarra", "Fowler", "Weiss",
|
"Logan", "Lloyd", "Quinn", "Monroe", "Brock", "Ibarra", "Fowler", "Weiss",
|
||||||
"Montgomery", "Diaz", "Dixon", "Olson", "Robertson", "Arias", "Benjamin",
|
"Montgomery", "Diaz", "Dixon", "Olson", "Robertson", "Arias", "Benjamin",
|
||||||
"Abbott", "Stein", "Schroeder", "Beck", "Velasquez", "Barber", "Nichols",
|
"Abbott", "Stein", "Schroeder", "Beck", "Velasquez", "Barber", "Nichols",
|
||||||
"Ortiz", "Burns", "Moody", "Stokes", "Wilcox", "Rush", "Michael", "Kidd",
|
"Ortiz", "Burns", "Moody", "Stokes", "Wilcox", "Rush", "Michael", "Kidd",
|
||||||
"Rowland", "Mclean", "Saunders", "Chung", "Newton", "Potter", "Hickman",
|
"Rowland", "Mclean", "Saunders", "Chung", "Newton", "Potter", "Hickman",
|
||||||
"Ray", "Larson", "Figueroa", "Duncan", "Sparks", "Rose", "Hodge", "Huynh",
|
"Ray", "Larson", "Figueroa", "Duncan", "Sparks", "Rose", "Hodge", "Huynh",
|
||||||
"Joseph", "Morales", "Beasley", "Mora", "Fry", "Ross", "Novak", "Hahn",
|
"Joseph", "Morales", "Beasley", "Mora", "Fry", "Ross", "Novak", "Hahn",
|
||||||
"Wise", "Knight", "Frederick", "Heath", "Pollard", "Vega", "Mcclain",
|
"Wise", "Knight", "Frederick", "Heath", "Pollard", "Vega", "Mcclain",
|
||||||
"Buckley", "Conrad", "Cantrell", "Bond", "Mejia", "Wang", "Lewis", "Johns",
|
"Buckley", "Conrad", "Cantrell", "Bond", "Mejia", "Wang", "Lewis", "Johns",
|
||||||
"Mcknight", "Callahan", "Reynolds", "Norris", "Burnett", "Carey", "Jacobson",
|
"Mcknight", "Callahan", "Reynolds", "Norris", "Burnett", "Carey", "Jacobson",
|
||||||
"Oneill", "Oconnor", "Leonard", "Mckenzie", "Hale", "Delgado", "Spence",
|
"Oneill", "Oconnor", "Leonard", "Mckenzie", "Hale", "Delgado", "Spence",
|
||||||
"Brandt", "Obrien", "Bowman", "James", "Avila", "Roberts", "Barker", "Cohen",
|
"Brandt", "Obrien", "Bowman", "James", "Avila", "Roberts", "Barker", "Cohen",
|
||||||
"Bradley", "Prince", "Warren", "Summers", "Little", "Caldwell", "Garrett",
|
"Bradley", "Prince", "Warren", "Summers", "Little", "Caldwell", "Garrett",
|
||||||
"Hughes", "Norton", "Burke", "Holden", "Merritt", "Lee", "Frank", "Wiley",
|
"Hughes", "Norton", "Burke", "Holden", "Merritt", "Lee", "Frank", "Wiley",
|
||||||
"Ho", "Weber", "Keith", "Winters", "Gray", "Watts", "Brady", "Aguilar",
|
"Ho", "Weber", "Keith", "Winters", "Gray", "Watts", "Brady", "Aguilar",
|
||||||
"Nicholson", "David", "Pace", "Cervantes", "Davis", "Baxter", "Sanchez",
|
"Nicholson", "David", "Pace", "Cervantes", "Davis", "Baxter", "Sanchez",
|
||||||
"Singleton", "Taylor", "Strickland", "Glenn", "Valentine", "Roy", "Cameron",
|
"Singleton", "Taylor", "Strickland", "Glenn", "Valentine", "Roy", "Cameron",
|
||||||
"Beard", "Norman", "Fritz", "Anthony", "Koch", "Parrish", "Herman", "Hines",
|
"Beard", "Norman", "Fritz", "Anthony", "Koch", "Parrish", "Herman", "Hines",
|
||||||
"Sutton", "Gallegos", "Stephenson", "Lozano", "Franklin", "Howe", "Bauer",
|
"Sutton", "Gallegos", "Stephenson", "Lozano", "Franklin", "Howe", "Bauer",
|
||||||
"Love", "Ali", "Ellison", "Lester", "Guzman", "Jarvis", "Espinoza",
|
"Love", "Ali", "Ellison", "Lester", "Guzman", "Jarvis", "Espinoza",
|
||||||
"Fletcher", "Burton", "Woodard", "Peterson", "Barajas", "Richard", "Bryan",
|
"Fletcher", "Burton", "Woodard", "Peterson", "Barajas", "Richard", "Bryan",
|
||||||
"Goodman", "Cline", "Rowe", "Faulkner", "Crawford", "Mueller", "Patterson",
|
"Goodman", "Cline", "Rowe", "Faulkner", "Crawford", "Mueller", "Patterson",
|
||||||
"Hull", "Walton", "Wu", "Flores", "York", "Dickson", "Barnes", "Fisher",
|
"Hull", "Walton", "Wu", "Flores", "York", "Dickson", "Barnes", "Fisher",
|
||||||
"Strong", "Juarez", "Fitzgerald", "Schmitt", "Blevins", "Villa", "Sullivan",
|
"Strong", "Juarez", "Fitzgerald", "Schmitt", "Blevins", "Villa", "Sullivan",
|
||||||
"Velazquez", "Horton", "Meadows", "Riley", "Barrera", "Neal", "Mendez",
|
"Velazquez", "Horton", "Meadows", "Riley", "Barrera", "Neal", "Mendez",
|
||||||
"Mcdonald", "Floyd", "Lynch", "Mcdowell", "Benson", "Hebert", "Livingston",
|
"Mcdonald", "Floyd", "Lynch", "Mcdowell", "Benson", "Hebert", "Livingston",
|
||||||
"Davies", "Richardson", "Vincent", "Davenport", "Osborn", "Mckee", "Marshall",
|
"Davies", "Richardson", "Vincent", "Davenport", "Osborn", "Mckee", "Marshall",
|
||||||
"Ferrell", "Martinez", "Melton", "Mercer", "Yoder", "Jacobs", "Mcdaniel",
|
"Ferrell", "Martinez", "Melton", "Mercer", "Yoder", "Jacobs", "Mcdaniel",
|
||||||
"Mcmillan", "Peters", "Atkinson", "Wood", "Briggs", "Valencia", "Chandler",
|
"Mcmillan", "Peters", "Atkinson", "Wood", "Briggs", "Valencia", "Chandler",
|
||||||
"Rios", "Hunter", "Bean", "Hicks", "Hays", "Lucero", "Malone", "Waller",
|
"Rios", "Hunter", "Bean", "Hicks", "Hays", "Lucero", "Malone", "Waller",
|
||||||
"Banks", "Myers", "Mitchell", "Grimes", "Houston", "Hampton", "Trujillo",
|
"Banks", "Myers", "Mitchell", "Grimes", "Houston", "Hampton", "Trujillo",
|
||||||
"Perkins", "Moran", "Welch", "Contreras", "Montes", "Ayers", "Hayden",
|
"Perkins", "Moran", "Welch", "Contreras", "Montes", "Ayers", "Hayden",
|
||||||
"Daniel", "Weeks", "Porter", "Gill", "Mullen", "Nolan", "Dorsey", "Crane",
|
"Daniel", "Weeks", "Porter", "Gill", "Mullen", "Nolan", "Dorsey", "Crane",
|
||||||
"Estes", "Lam", "Wells", "Cisneros", "Giles", "Watson", "Vang", "Scott",
|
"Estes", "Lam", "Wells", "Cisneros", "Giles", "Watson", "Vang", "Scott",
|
||||||
"Knox", "Hanna", "Fields",
|
"Knox", "Hanna", "Fields",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def clean(json_string):
|
def clean(json_string):
|
||||||
# Strip JSON XSS Tag
|
# Strip JSON XSS Tag
|
||||||
json_string = json_string.strip()
|
json_string = json_string.strip()
|
||||||
if json_string.startswith(")]}'"):
|
if json_string.startswith(")]}'"):
|
||||||
return json_string[5:]
|
return json_string[5:]
|
||||||
return json_string
|
return json_string
|
||||||
|
|
||||||
|
|
||||||
def basic_auth(user):
|
def basic_auth(user):
|
||||||
return requests.auth.HTTPBasicAuth(user["username"], user["http_password"])
|
return requests.auth.HTTPBasicAuth(user["username"], user["http_password"])
|
||||||
|
|
||||||
|
|
||||||
def fetch_admin_group():
|
def fetch_admin_group():
|
||||||
global GROUP_ADMIN
|
global GROUP_ADMIN
|
||||||
# Get admin group
|
# Get admin group
|
||||||
r = json.loads(clean(requests.get(BASE_URL + "groups/" + "?suggest=ad&p=All-Projects",
|
r = json.loads(clean(requests.get(BASE_URL + "groups/" + "?suggest=ad&p=All-Projects",
|
||||||
headers=HEADERS,
|
headers=HEADERS,
|
||||||
auth=ADMIN_BASIC_AUTH).text))
|
auth=ADMIN_BASIC_AUTH).text))
|
||||||
admin_group_name = r.keys()[0]
|
admin_group_name = r.keys()[0]
|
||||||
GROUP_ADMIN = r[admin_group_name]
|
GROUP_ADMIN = r[admin_group_name]
|
||||||
GROUP_ADMIN["name"] = admin_group_name
|
GROUP_ADMIN["name"] = admin_group_name
|
||||||
|
|
||||||
|
|
||||||
def generate_random_text():
|
def generate_random_text():
|
||||||
return " ".join([random.choice("lorem ipsum "
|
return " ".join([random.choice("lorem ipsum "
|
||||||
"doleret delendam "
|
"doleret delendam "
|
||||||
"\n esse".split(" ")) for _ in range(1, 100)])
|
"\n esse".split(" ")) for _ in range(1, 100)])
|
||||||
|
|
||||||
|
|
||||||
def set_up():
|
def set_up():
|
||||||
global TMP_PATH
|
global TMP_PATH
|
||||||
TMP_PATH = tempfile.mkdtemp()
|
TMP_PATH = tempfile.mkdtemp()
|
||||||
atexit.register(clean_up)
|
atexit.register(clean_up)
|
||||||
os.makedirs(TMP_PATH + "/ssh")
|
os.makedirs(TMP_PATH + "/ssh")
|
||||||
os.makedirs(TMP_PATH + "/repos")
|
os.makedirs(TMP_PATH + "/repos")
|
||||||
fetch_admin_group()
|
fetch_admin_group()
|
||||||
|
|
||||||
|
|
||||||
def get_random_users(num_users):
|
def get_random_users(num_users):
|
||||||
users = random.sample([(f, l) for f in FIRST_NAMES for l in LAST_NAMES],
|
users = random.sample([(f, l) for f in FIRST_NAMES for l in LAST_NAMES],
|
||||||
num_users)
|
num_users)
|
||||||
names = []
|
names = []
|
||||||
for u in users:
|
for u in users:
|
||||||
names.append({"firstname": u[0],
|
names.append({"firstname": u[0],
|
||||||
"lastname": u[1],
|
"lastname": u[1],
|
||||||
"name": u[0] + " " + u[1],
|
"name": u[0] + " " + u[1],
|
||||||
"username": u[0] + u[1],
|
"username": u[0] + u[1],
|
||||||
"email": u[0] + "." + u[1] + "@gerritcodereview.com",
|
"email": u[0] + "." + u[1] + "@gerritcodereview.com",
|
||||||
"http_password": "secret",
|
"http_password": "secret",
|
||||||
"groups": []})
|
"groups": []})
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
|
||||||
def generate_ssh_keys(gerrit_users):
|
def generate_ssh_keys(gerrit_users):
|
||||||
for user in gerrit_users:
|
for user in gerrit_users:
|
||||||
key_file = TMP_PATH + "/ssh/" + user["username"] + ".key"
|
key_file = TMP_PATH + "/ssh/" + user["username"] + ".key"
|
||||||
subprocess.check_output(["ssh-keygen", "-f", key_file, "-N", ""])
|
subprocess.check_output(["ssh-keygen", "-f", key_file, "-N", ""])
|
||||||
with open(key_file + ".pub", "r") as f:
|
with open(key_file + ".pub", "r") as f:
|
||||||
user["ssh_key"] = f.read()
|
user["ssh_key"] = f.read()
|
||||||
|
|
||||||
|
|
||||||
def create_gerrit_groups():
|
def create_gerrit_groups():
|
||||||
groups = [
|
groups = [
|
||||||
{"name": "iOS-Maintainers", "description": "iOS Maintainers",
|
{"name": "iOS-Maintainers", "description": "iOS Maintainers",
|
||||||
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
||||||
"owner_id": GROUP_ADMIN["id"]},
|
"owner_id": GROUP_ADMIN["id"]},
|
||||||
{"name": "Android-Maintainers", "description": "Android Maintainers",
|
{"name": "Android-Maintainers", "description": "Android Maintainers",
|
||||||
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
||||||
"owner_id": GROUP_ADMIN["id"]},
|
"owner_id": GROUP_ADMIN["id"]},
|
||||||
{"name": "Backend-Maintainers", "description": "Backend Maintainers",
|
{"name": "Backend-Maintainers", "description": "Backend Maintainers",
|
||||||
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
||||||
"owner_id": GROUP_ADMIN["id"]},
|
"owner_id": GROUP_ADMIN["id"]},
|
||||||
{"name": "Script-Maintainers", "description": "Script Maintainers",
|
{"name": "Script-Maintainers", "description": "Script Maintainers",
|
||||||
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
"visible_to_all": True, "owner": GROUP_ADMIN["name"],
|
||||||
"owner_id": GROUP_ADMIN["id"]},
|
"owner_id": GROUP_ADMIN["id"]},
|
||||||
{"name": "Security-Team", "description": "Sec Team",
|
{"name": "Security-Team", "description": "Sec Team",
|
||||||
"visible_to_all": False, "owner": GROUP_ADMIN["name"],
|
"visible_to_all": False, "owner": GROUP_ADMIN["name"],
|
||||||
"owner_id": GROUP_ADMIN["id"]}]
|
"owner_id": GROUP_ADMIN["id"]}]
|
||||||
for g in groups:
|
for g in groups:
|
||||||
requests.put(BASE_URL + "groups/" + g["name"],
|
requests.put(BASE_URL + "groups/" + g["name"],
|
||||||
json.dumps(g),
|
json.dumps(g),
|
||||||
headers=HEADERS,
|
headers=HEADERS,
|
||||||
auth=ADMIN_BASIC_AUTH)
|
auth=ADMIN_BASIC_AUTH)
|
||||||
return [g["name"] for g in groups]
|
return [g["name"] for g in groups]
|
||||||
|
|
||||||
|
|
||||||
def create_gerrit_projects(owner_groups):
|
def create_gerrit_projects(owner_groups):
|
||||||
projects = [
|
projects = [
|
||||||
{"id": "android", "name": "Android", "parent": "All-Projects",
|
{"id": "android", "name": "Android", "parent": "All-Projects",
|
||||||
"branches": ["master"], "description": "Our android app.",
|
"branches": ["master"], "description": "Our android app.",
|
||||||
"owners": [owner_groups[0]], "create_empty_commit": True},
|
"owners": [owner_groups[0]], "create_empty_commit": True},
|
||||||
{"id": "ios", "name": "iOS", "parent": "All-Projects",
|
{"id": "ios", "name": "iOS", "parent": "All-Projects",
|
||||||
"branches": ["master"], "description": "Our ios app.",
|
"branches": ["master"], "description": "Our ios app.",
|
||||||
"owners": [owner_groups[1]], "create_empty_commit": True},
|
"owners": [owner_groups[1]], "create_empty_commit": True},
|
||||||
{"id": "backend", "name": "Backend", "parent": "All-Projects",
|
{"id": "backend", "name": "Backend", "parent": "All-Projects",
|
||||||
"branches": ["master"], "description": "Our awesome backend.",
|
"branches": ["master"], "description": "Our awesome backend.",
|
||||||
"owners": [owner_groups[2]], "create_empty_commit": True},
|
"owners": [owner_groups[2]], "create_empty_commit": True},
|
||||||
{"id": "scripts", "name": "Scripts", "parent": "All-Projects",
|
{"id": "scripts", "name": "Scripts", "parent": "All-Projects",
|
||||||
"branches": ["master"], "description": "some small scripts.",
|
"branches": ["master"], "description": "some small scripts.",
|
||||||
"owners": [owner_groups[3]], "create_empty_commit": True}]
|
"owners": [owner_groups[3]], "create_empty_commit": True}]
|
||||||
for p in projects:
|
for p in projects:
|
||||||
requests.put(BASE_URL + "projects/" + p["name"],
|
requests.put(BASE_URL + "projects/" + p["name"],
|
||||||
json.dumps(p),
|
json.dumps(p),
|
||||||
headers=HEADERS,
|
headers=HEADERS,
|
||||||
auth=ADMIN_BASIC_AUTH)
|
auth=ADMIN_BASIC_AUTH)
|
||||||
return [p["name"] for p in projects]
|
return [p["name"] for p in projects]
|
||||||
|
|
||||||
|
|
||||||
def create_gerrit_users(gerrit_users):
|
def create_gerrit_users(gerrit_users):
|
||||||
for user in gerrit_users:
|
for user in gerrit_users:
|
||||||
requests.put(BASE_URL + "accounts/" + user["username"],
|
requests.put(BASE_URL + "accounts/" + user["username"],
|
||||||
json.dumps(user),
|
json.dumps(user),
|
||||||
headers=HEADERS,
|
headers=HEADERS,
|
||||||
auth=ADMIN_BASIC_AUTH)
|
auth=ADMIN_BASIC_AUTH)
|
||||||
|
|
||||||
|
|
||||||
def create_change(user, project_name):
|
def create_change(user, project_name):
|
||||||
random_commit_message = generate_random_text()
|
random_commit_message = generate_random_text()
|
||||||
change = {
|
change = {
|
||||||
"project": project_name,
|
"project": project_name,
|
||||||
"subject": random_commit_message.split("\n")[0],
|
"subject": random_commit_message.split("\n")[0],
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"status": "NEW",
|
"status": "NEW",
|
||||||
}
|
}
|
||||||
requests.post(BASE_URL + "changes/",
|
requests.post(BASE_URL + "changes/",
|
||||||
json.dumps(change),
|
json.dumps(change),
|
||||||
headers=HEADERS,
|
headers=HEADERS,
|
||||||
auth=basic_auth(user))
|
auth=basic_auth(user))
|
||||||
|
|
||||||
|
|
||||||
def clean_up():
|
def clean_up():
|
||||||
shutil.rmtree(TMP_PATH)
|
shutil.rmtree(TMP_PATH)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
<<<<<<< HEAD
|
||||||
p = optparse.OptionParser()
|
p = optparse.OptionParser()
|
||||||
p.add_option("-u", "--user_count", action="store",
|
p.add_option("-u", "--user_count", action="store",
|
||||||
default=100,
|
default=100,
|
||||||
@ -301,5 +302,40 @@ def main():
|
|||||||
for idx, u in enumerate(gerrit_users):
|
for idx, u in enumerate(gerrit_users):
|
||||||
for _ in range(random.randint(1, 5)):
|
for _ in range(random.randint(1, 5)):
|
||||||
create_change(u, project_names[4 * idx / len(gerrit_users)])
|
create_change(u, project_names[4 * idx / len(gerrit_users)])
|
||||||
|
=======
|
||||||
|
p = optparse.OptionParser()
|
||||||
|
p.add_option("-u", "--user_count", action="store",
|
||||||
|
default=100,
|
||||||
|
type='int',
|
||||||
|
help="number of users to generate")
|
||||||
|
p.add_option("-p", "--port", action="store",
|
||||||
|
default=8080,
|
||||||
|
type='int',
|
||||||
|
help="port of server")
|
||||||
|
(options, _) = p.parse_args()
|
||||||
|
global BASE_URL
|
||||||
|
BASE_URL = BASE_URL % options.port
|
||||||
|
print(BASE_URL)
|
||||||
|
|
||||||
|
set_up()
|
||||||
|
gerrit_users = get_random_users(options.user_count)
|
||||||
|
|
||||||
|
group_names = create_gerrit_groups()
|
||||||
|
for idx, u in enumerate(gerrit_users):
|
||||||
|
u["groups"].append(group_names[idx % len(group_names)])
|
||||||
|
if idx % 5 == 0:
|
||||||
|
# Also add to security group
|
||||||
|
u["groups"].append(group_names[4])
|
||||||
|
|
||||||
|
generate_ssh_keys(gerrit_users)
|
||||||
|
create_gerrit_users(gerrit_users)
|
||||||
|
|
||||||
|
project_names = create_gerrit_projects(group_names)
|
||||||
|
|
||||||
|
for idx, u in enumerate(gerrit_users):
|
||||||
|
for _ in xrange(random.randint(1, 5)):
|
||||||
|
create_change(u, project_names[4 * idx / len(gerrit_users)])
|
||||||
|
|
||||||
|
>>>>>>> 730efd14f4... Python cleanups, round 1: whitespace
|
||||||
|
|
||||||
main()
|
main()
|
||||||
|
@ -10,103 +10,109 @@ fnCompiledRegex = re.compile(removeSelfInvokeRegex, re.DOTALL)
|
|||||||
regexBehavior = r"<script>(.+)<\/script>"
|
regexBehavior = r"<script>(.+)<\/script>"
|
||||||
behaviorCompiledRegex = re.compile(regexBehavior, re.DOTALL)
|
behaviorCompiledRegex = re.compile(regexBehavior, re.DOTALL)
|
||||||
|
|
||||||
|
|
||||||
def _open(filename, mode="r"):
|
def _open(filename, mode="r"):
|
||||||
try:
|
try:
|
||||||
return open(filename, mode, encoding="utf-8")
|
return open(filename, mode, encoding="utf-8")
|
||||||
except TypeError:
|
except TypeError:
|
||||||
return open(filename, mode)
|
return open(filename, mode)
|
||||||
|
|
||||||
def replaceBehaviorLikeHTML (fileIn, fileOut):
|
|
||||||
with _open(fileIn) as f:
|
|
||||||
file_str = f.read()
|
|
||||||
match = behaviorCompiledRegex.search(file_str)
|
|
||||||
if (match):
|
|
||||||
with _open("polygerrit-ui/temp/behaviors/" + fileOut.replace("html", "js") , "w+") as f:
|
|
||||||
f.write(match.group(1))
|
|
||||||
|
|
||||||
def replaceBehaviorLikeJS (fileIn, fileOut):
|
def replaceBehaviorLikeHTML(fileIn, fileOut):
|
||||||
with _open(fileIn) as f:
|
with _open(fileIn) as f:
|
||||||
file_str = f.read()
|
file_str = f.read()
|
||||||
with _open("polygerrit-ui/temp/behaviors/" + fileOut , "w+") as f:
|
match = behaviorCompiledRegex.search(file_str)
|
||||||
f.write(file_str)
|
if (match):
|
||||||
|
with _open("polygerrit-ui/temp/behaviors/" + fileOut.replace("html", "js"), "w+") as f:
|
||||||
|
f.write(match.group(1))
|
||||||
|
|
||||||
|
|
||||||
|
def replaceBehaviorLikeJS(fileIn, fileOut):
|
||||||
|
with _open(fileIn) as f:
|
||||||
|
file_str = f.read()
|
||||||
|
with _open("polygerrit-ui/temp/behaviors/" + fileOut, "w+") as f:
|
||||||
|
f.write(file_str)
|
||||||
|
|
||||||
|
|
||||||
def generateStubBehavior(behaviorName):
|
def generateStubBehavior(behaviorName):
|
||||||
with _open("polygerrit-ui/temp/behaviors/" + behaviorName + ".js", "w+") as f:
|
with _open("polygerrit-ui/temp/behaviors/" + behaviorName + ".js", "w+") as f:
|
||||||
f.write("/** @polymerBehavior **/\n" + behaviorName + "= {};")
|
f.write("/** @polymerBehavior **/\n" + behaviorName + "= {};")
|
||||||
|
|
||||||
def replacePolymerElement (fileIn, fileOut, root):
|
|
||||||
with _open(fileIn) as f:
|
|
||||||
key = fileOut.split('.')[0]
|
|
||||||
# Removed self invoked function
|
|
||||||
file_str = f.read()
|
|
||||||
file_str_no_fn = fnCompiledRegex.search(file_str)
|
|
||||||
|
|
||||||
if file_str_no_fn:
|
def replacePolymerElement(fileIn, fileOut, root):
|
||||||
package = root.replace("/", ".") + "." + fileOut
|
with _open(fileIn) as f:
|
||||||
|
key = fileOut.split('.')[0]
|
||||||
|
# Removed self invoked function
|
||||||
|
file_str = f.read()
|
||||||
|
file_str_no_fn = fnCompiledRegex.search(file_str)
|
||||||
|
|
||||||
with _open("polygerrit-ui/temp/" + fileOut, "w+") as f:
|
if file_str_no_fn:
|
||||||
mainFileContents = re.sub(polymerCompiledRegex, "exports = Polymer({", file_str_no_fn.group(1)).replace("'use strict';", "")
|
package = root.replace("/", ".") + "." + fileOut
|
||||||
f.write("/** \n" \
|
|
||||||
"* @fileoverview \n" \
|
with _open("polygerrit-ui/temp/" + fileOut, "w+") as f:
|
||||||
"* @suppress {missingProperties} \n" \
|
mainFileContents = re.sub(polymerCompiledRegex, "exports = Polymer({", file_str_no_fn.group(1)).replace("'use strict';", "")
|
||||||
"*/ \n\n" \
|
f.write("/** \n" \
|
||||||
"goog.module('polygerrit." + package + "')\n\n" + mainFileContents)
|
"* @fileoverview \n" \
|
||||||
|
"* @suppress {missingProperties} \n" \
|
||||||
|
"*/ \n\n" \
|
||||||
|
"goog.module('polygerrit." + package + "')\n\n" + mainFileContents)
|
||||||
|
|
||||||
|
# Add package and javascript to files object.
|
||||||
|
elements[key]["js"] = "polygerrit-ui/temp/" + fileOut
|
||||||
|
elements[key]["package"] = package
|
||||||
|
|
||||||
# Add package and javascript to files object.
|
|
||||||
elements[key]["js"] = "polygerrit-ui/temp/" + fileOut
|
|
||||||
elements[key]["package"] = package
|
|
||||||
|
|
||||||
def writeTempFile(file, root):
|
def writeTempFile(file, root):
|
||||||
# This is included in an extern because it is directly on the window object.
|
# This is included in an extern because it is directly on the window object.
|
||||||
# (for now at least).
|
# (for now at least).
|
||||||
if "gr-reporting" in file:
|
if "gr-reporting" in file:
|
||||||
return
|
return
|
||||||
key = file.split('.')[0]
|
key = file.split('.')[0]
|
||||||
if not key in elements:
|
if not key in elements:
|
||||||
# gr-app doesn't have an additional level
|
# gr-app doesn't have an additional level
|
||||||
elements[key] = {"directory": 'gr-app' if len(root.split("/")) < 4 else root.split("/")[3]}
|
elements[key] = {"directory": 'gr-app' if len(root.split("/")) < 4 else root.split("/")[3]}
|
||||||
if file.endswith(".html") and not file.endswith("_test.html"):
|
if file.endswith(".html") and not file.endswith("_test.html"):
|
||||||
# gr-navigation is treated like a behavior rather than a standard element
|
# gr-navigation is treated like a behavior rather than a standard element
|
||||||
# because of the way it added to the Gerrit object.
|
# because of the way it added to the Gerrit object.
|
||||||
if file.endswith("gr-navigation.html"):
|
if file.endswith("gr-navigation.html"):
|
||||||
replaceBehaviorLikeHTML(os.path.join(root, file), file)
|
replaceBehaviorLikeHTML(os.path.join(root, file), file)
|
||||||
else:
|
else:
|
||||||
elements[key]["html"] = os.path.join(root, file)
|
elements[key]["html"] = os.path.join(root, file)
|
||||||
if file.endswith(".js"):
|
if file.endswith(".js"):
|
||||||
replacePolymerElement(os.path.join(root, file), file, root)
|
replacePolymerElement(os.path.join(root, file), file, root)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Create temp directory.
|
# Create temp directory.
|
||||||
if not os.path.exists("polygerrit-ui/temp"):
|
if not os.path.exists("polygerrit-ui/temp"):
|
||||||
os.makedirs("polygerrit-ui/temp")
|
os.makedirs("polygerrit-ui/temp")
|
||||||
|
|
||||||
# Within temp directory create behavior directory.
|
# Within temp directory create behavior directory.
|
||||||
if not os.path.exists("polygerrit-ui/temp/behaviors"):
|
if not os.path.exists("polygerrit-ui/temp/behaviors"):
|
||||||
os.makedirs("polygerrit-ui/temp/behaviors")
|
os.makedirs("polygerrit-ui/temp/behaviors")
|
||||||
|
|
||||||
elements = {}
|
elements = {}
|
||||||
|
|
||||||
# Go through every file in app/elements, and re-write accordingly to temp
|
# Go through every file in app/elements, and re-write accordingly to temp
|
||||||
# directory, and also added to elements object, which is used to generate a
|
# directory, and also added to elements object, which is used to generate a
|
||||||
# map of html files, package names, and javascript files.
|
# map of html files, package names, and javascript files.
|
||||||
for root, dirs, files in os.walk("polygerrit-ui/app/elements"):
|
for root, dirs, files in os.walk("polygerrit-ui/app/elements"):
|
||||||
for file in files:
|
for file in files:
|
||||||
writeTempFile(file, root)
|
writeTempFile(file, root)
|
||||||
|
|
||||||
# Special case for polymer behaviors we are using.
|
# Special case for polymer behaviors we are using.
|
||||||
replaceBehaviorLikeHTML("polygerrit-ui/app/bower_components/iron-a11y-keys-behavior/iron-a11y-keys-behavior.html", "iron-a11y-keys-behavior.html")
|
replaceBehaviorLikeHTML("polygerrit-ui/app/bower_components/iron-a11y-keys-behavior/iron-a11y-keys-behavior.html", "iron-a11y-keys-behavior.html")
|
||||||
generateStubBehavior("Polymer.IronOverlayBehavior")
|
generateStubBehavior("Polymer.IronOverlayBehavior")
|
||||||
generateStubBehavior("Polymer.IronFitBehavior")
|
generateStubBehavior("Polymer.IronFitBehavior")
|
||||||
|
|
||||||
#TODO figure out something to do with iron-overlay-behavior. it is hard-coded reformatted.
|
#TODO figure out something to do with iron-overlay-behavior. it is hard-coded reformatted.
|
||||||
|
|
||||||
with _open("polygerrit-ui/temp/map.json", "w+") as f:
|
with _open("polygerrit-ui/temp/map.json", "w+") as f:
|
||||||
f.write(json.dumps(elements))
|
f.write(json.dumps(elements))
|
||||||
|
|
||||||
for root, dirs, files in os.walk("polygerrit-ui/app/behaviors"):
|
for root, dirs, files in os.walk("polygerrit-ui/app/behaviors"):
|
||||||
for file in files:
|
for file in files:
|
||||||
if file.endswith("behavior.html"):
|
if file.endswith("behavior.html"):
|
||||||
replaceBehaviorLikeHTML(os.path.join(root, file), file)
|
replaceBehaviorLikeHTML(os.path.join(root, file), file)
|
||||||
elif file.endswith("behavior.js"):
|
elif file.endswith("behavior.js"):
|
||||||
replaceBehaviorLikeJS(os.path.join(root, file), file)
|
replaceBehaviorLikeJS(os.path.join(root, file), file)
|
||||||
|
@ -19,14 +19,16 @@
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
def print_help():
|
def print_help():
|
||||||
for (n, v) in vars(sys.modules['__main__']).items():
|
for (n, v) in vars(sys.modules['__main__']).items():
|
||||||
if not n.startswith("__") and not n in ['help', 'reload'] \
|
if not n.startswith("__") and not n in ['help', 'reload'] \
|
||||||
and str(type(v)) != "<type 'javapackage'>" \
|
and str(type(v)) != "<type 'javapackage'>" \
|
||||||
and not str(v).startswith("<module"):
|
and not str(v).startswith("<module"):
|
||||||
print("\"%s\" is \"%s\"" % (n, v))
|
print("\"%s\" is \"%s\"" % (n, v))
|
||||||
print()
|
print()
|
||||||
print("Welcome to the Gerrit Inspector")
|
print("Welcome to the Gerrit Inspector")
|
||||||
print("Enter help() to see the above again, EOF to quit and stop Gerrit")
|
print("Enter help() to see the above again, EOF to quit and stop Gerrit")
|
||||||
|
|
||||||
|
|
||||||
print_help()
|
print_help()
|
||||||
|
@ -25,33 +25,33 @@ graph = defaultdict(list)
|
|||||||
handled_rules = []
|
handled_rules = []
|
||||||
|
|
||||||
for xml in args.xmls:
|
for xml in args.xmls:
|
||||||
tree = ET.parse(xml)
|
tree = ET.parse(xml)
|
||||||
root = tree.getroot()
|
root = tree.getroot()
|
||||||
|
|
||||||
for child in root:
|
for child in root:
|
||||||
rule_name = child.attrib["name"]
|
rule_name = child.attrib["name"]
|
||||||
if rule_name in handled_rules:
|
if rule_name in handled_rules:
|
||||||
# already handled in other xml files
|
# already handled in other xml files
|
||||||
continue
|
continue
|
||||||
|
|
||||||
handled_rules.append(rule_name)
|
handled_rules.append(rule_name)
|
||||||
for c in child.getchildren():
|
for c in child.getchildren():
|
||||||
if c.tag != "rule-input":
|
if c.tag != "rule-input":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
license_name = c.attrib["name"]
|
license_name = c.attrib["name"]
|
||||||
if LICENSE_PREFIX in license_name:
|
if LICENSE_PREFIX in license_name:
|
||||||
entries[rule_name].append(license_name)
|
entries[rule_name].append(license_name)
|
||||||
graph[license_name].append(rule_name)
|
graph[license_name].append(rule_name)
|
||||||
|
|
||||||
if len(graph[DO_NOT_DISTRIBUTE]):
|
if len(graph[DO_NOT_DISTRIBUTE]):
|
||||||
print("DO_NOT_DISTRIBUTE license found in:", file=stderr)
|
print("DO_NOT_DISTRIBUTE license found in:", file=stderr)
|
||||||
for target in graph[DO_NOT_DISTRIBUTE]:
|
for target in graph[DO_NOT_DISTRIBUTE]:
|
||||||
print(target, file=stderr)
|
print(target, file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if args.asciidoctor:
|
if args.asciidoctor:
|
||||||
print(
|
print(
|
||||||
# We don't want any blank line before "= Gerrit Code Review - Licenses"
|
# We don't want any blank line before "= Gerrit Code Review - Licenses"
|
||||||
"""= Gerrit Code Review - Licenses
|
"""= Gerrit Code Review - Licenses
|
||||||
|
|
||||||
@ -93,39 +93,39 @@ updates of mirror servers, or realtime backups.
|
|||||||
""")
|
""")
|
||||||
|
|
||||||
for n in sorted(graph.keys()):
|
for n in sorted(graph.keys()):
|
||||||
if len(graph[n]) == 0:
|
if len(graph[n]) == 0:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
name = n[len(LICENSE_PREFIX):]
|
name = n[len(LICENSE_PREFIX):]
|
||||||
safename = name.replace(".", "_")
|
safename = name.replace(".", "_")
|
||||||
print()
|
print()
|
||||||
print("[[%s]]" % safename)
|
print("[[%s]]" % safename)
|
||||||
print(name)
|
print(name)
|
||||||
print()
|
print()
|
||||||
for d in sorted(graph[n]):
|
for d in sorted(graph[n]):
|
||||||
if d.startswith("//lib:") or d.startswith("//lib/"):
|
if d.startswith("//lib:") or d.startswith("//lib/"):
|
||||||
p = d[len("//lib:"):]
|
p = d[len("//lib:"):]
|
||||||
else:
|
else:
|
||||||
p = d[d.index(":")+1:].lower()
|
p = d[d.index(":")+1:].lower()
|
||||||
if "__" in p:
|
if "__" in p:
|
||||||
p = p[:p.index("__")]
|
p = p[:p.index("__")]
|
||||||
print("* " + p)
|
print("* " + p)
|
||||||
print()
|
print()
|
||||||
print("[[%s_license]]" % safename)
|
print("[[%s_license]]" % safename)
|
||||||
print("----")
|
print("----")
|
||||||
filename = n[2:].replace(":", "/")
|
filename = n[2:].replace(":", "/")
|
||||||
try:
|
try:
|
||||||
with open(filename, errors='ignore') as fd:
|
with open(filename, errors='ignore') as fd:
|
||||||
copyfileobj(fd, stdout)
|
copyfileobj(fd, stdout)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
with open(filename) as fd:
|
with open(filename) as fd:
|
||||||
copyfileobj(fd, stdout)
|
copyfileobj(fd, stdout)
|
||||||
print()
|
print()
|
||||||
print("----")
|
print("----")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
if args.asciidoctor:
|
if args.asciidoctor:
|
||||||
print(
|
print(
|
||||||
"""
|
"""
|
||||||
GERRIT
|
GERRIT
|
||||||
------
|
------
|
||||||
|
@ -30,49 +30,50 @@ LOCAL_PROPERTIES = 'local.properties'
|
|||||||
|
|
||||||
|
|
||||||
def safe_mkdirs(d):
|
def safe_mkdirs(d):
|
||||||
if path.isdir(d):
|
if path.isdir(d):
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
makedirs(d)
|
makedirs(d)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if not path.isdir(d):
|
if not path.isdir(d):
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
|
|
||||||
def download_properties(root_dir):
|
def download_properties(root_dir):
|
||||||
""" Get the download properties.
|
""" Get the download properties.
|
||||||
|
|
||||||
First tries to find the properties file in the given root directory,
|
First tries to find the properties file in the given root directory,
|
||||||
and if not found there, tries in the Gerrit settings folder in the
|
and if not found there, tries in the Gerrit settings folder in the
|
||||||
user's home directory.
|
user's home directory.
|
||||||
|
|
||||||
Returns a set of download properties, which may be empty.
|
Returns a set of download properties, which may be empty.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
p = {}
|
p = {}
|
||||||
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
|
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
|
||||||
if not path.isfile(local_prop):
|
if not path.isfile(local_prop):
|
||||||
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
|
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
|
||||||
if path.isfile(local_prop):
|
if path.isfile(local_prop):
|
||||||
try:
|
try:
|
||||||
with open(local_prop) as fd:
|
with open(local_prop) as fd:
|
||||||
for line in fd:
|
for line in fd:
|
||||||
if line.startswith('download.'):
|
if line.startswith('download.'):
|
||||||
d = [e.strip() for e in line.split('=', 1)]
|
d = [e.strip() for e in line.split('=', 1)]
|
||||||
name, url = d[0], d[1]
|
name, url = d[0], d[1]
|
||||||
p[name[len('download.'):]] = url
|
p[name[len('download.'):]] = url
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
return p
|
return p
|
||||||
|
|
||||||
|
|
||||||
def cache_entry(args):
|
def cache_entry(args):
|
||||||
if args.v:
|
if args.v:
|
||||||
h = args.v
|
h = args.v
|
||||||
else:
|
else:
|
||||||
h = sha1(args.u.encode('utf-8')).hexdigest()
|
h = sha1(args.u.encode('utf-8')).hexdigest()
|
||||||
name = '%s-%s' % (path.basename(args.o), h)
|
name = '%s-%s' % (path.basename(args.o), h)
|
||||||
return path.join(CACHE_DIR, name)
|
return path.join(CACHE_DIR, name)
|
||||||
|
|
||||||
|
|
||||||
opts = OptionParser()
|
opts = OptionParser()
|
||||||
opts.add_option('-o', help='local output file')
|
opts.add_option('-o', help='local output file')
|
||||||
@ -85,89 +86,89 @@ args, _ = opts.parse_args()
|
|||||||
|
|
||||||
root_dir = args.o
|
root_dir = args.o
|
||||||
while root_dir and path.dirname(root_dir) != root_dir:
|
while root_dir and path.dirname(root_dir) != root_dir:
|
||||||
root_dir, n = path.split(root_dir)
|
root_dir, n = path.split(root_dir)
|
||||||
if n == 'WORKSPACE':
|
if n == 'WORKSPACE':
|
||||||
break
|
break
|
||||||
|
|
||||||
redirects = download_properties(root_dir)
|
redirects = download_properties(root_dir)
|
||||||
cache_ent = cache_entry(args)
|
cache_ent = cache_entry(args)
|
||||||
src_url = resolve_url(args.u, redirects)
|
src_url = resolve_url(args.u, redirects)
|
||||||
|
|
||||||
if not path.exists(cache_ent):
|
if not path.exists(cache_ent):
|
||||||
try:
|
try:
|
||||||
safe_mkdirs(path.dirname(cache_ent))
|
safe_mkdirs(path.dirname(cache_ent))
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
print('error creating directory %s: %s' %
|
print('error creating directory %s: %s' %
|
||||||
(path.dirname(cache_ent), err), file=stderr)
|
(path.dirname(cache_ent), err), file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
print('Download %s' % src_url, file=stderr)
|
print('Download %s' % src_url, file=stderr)
|
||||||
try:
|
try:
|
||||||
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
|
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
|
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
except CalledProcessError as err:
|
except CalledProcessError as err:
|
||||||
print('error using curl: %s' % err, file=stderr)
|
print('error using curl: %s' % err, file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if args.v:
|
if args.v:
|
||||||
have = hash_file(sha1(), cache_ent).hexdigest()
|
have = hash_file(sha1(), cache_ent).hexdigest()
|
||||||
if args.v != have:
|
if args.v != have:
|
||||||
print((
|
print((
|
||||||
'%s:\n' +
|
'%s:\n' +
|
||||||
'expected %s\n' +
|
'expected %s\n' +
|
||||||
'received %s\n') % (src_url, args.v, have), file=stderr)
|
'received %s\n') % (src_url, args.v, have), file=stderr)
|
||||||
try:
|
try:
|
||||||
remove(cache_ent)
|
remove(cache_ent)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if path.exists(cache_ent):
|
if path.exists(cache_ent):
|
||||||
print('error removing %s: %s' % (cache_ent, err), file=stderr)
|
print('error removing %s: %s' % (cache_ent, err), file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
exclude = []
|
exclude = []
|
||||||
if args.x:
|
if args.x:
|
||||||
exclude += args.x
|
exclude += args.x
|
||||||
if args.exclude_java_sources:
|
if args.exclude_java_sources:
|
||||||
try:
|
try:
|
||||||
with ZipFile(cache_ent, 'r') as zf:
|
with ZipFile(cache_ent, 'r') as zf:
|
||||||
for n in zf.namelist():
|
for n in zf.namelist():
|
||||||
if n.endswith('.java'):
|
if n.endswith('.java'):
|
||||||
exclude.append(n)
|
exclude.append(n)
|
||||||
except (BadZipfile, LargeZipFile) as err:
|
except (BadZipfile, LargeZipFile) as err:
|
||||||
print('error opening %s: %s' % (cache_ent, err), file=stderr)
|
print('error opening %s: %s' % (cache_ent, err), file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if args.unsign:
|
if args.unsign:
|
||||||
try:
|
try:
|
||||||
with ZipFile(cache_ent, 'r') as zf:
|
with ZipFile(cache_ent, 'r') as zf:
|
||||||
for n in zf.namelist():
|
for n in zf.namelist():
|
||||||
if (n.endswith('.RSA')
|
if (n.endswith('.RSA')
|
||||||
or n.endswith('.SF')
|
or n.endswith('.SF')
|
||||||
or n.endswith('.LIST')):
|
or n.endswith('.LIST')):
|
||||||
exclude.append(n)
|
exclude.append(n)
|
||||||
except (BadZipfile, LargeZipFile) as err:
|
except (BadZipfile, LargeZipFile) as err:
|
||||||
print('error opening %s: %s' % (cache_ent, err), file=stderr)
|
print('error opening %s: %s' % (cache_ent, err), file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
safe_mkdirs(path.dirname(args.o))
|
safe_mkdirs(path.dirname(args.o))
|
||||||
if exclude:
|
if exclude:
|
||||||
try:
|
|
||||||
shutil.copyfile(cache_ent, args.o)
|
|
||||||
except (shutil.Error, IOError) as err:
|
|
||||||
print('error copying to %s: %s' % (args.o, err), file=stderr)
|
|
||||||
exit(1)
|
|
||||||
try:
|
|
||||||
check_call(['zip', '-d', args.o] + exclude)
|
|
||||||
except CalledProcessError as err:
|
|
||||||
print('error removing files from zip: %s' % err, file=stderr)
|
|
||||||
exit(1)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
link(cache_ent, args.o)
|
|
||||||
except OSError as err:
|
|
||||||
try:
|
try:
|
||||||
shutil.copyfile(cache_ent, args.o)
|
shutil.copyfile(cache_ent, args.o)
|
||||||
except (shutil.Error, IOError) as err:
|
except (shutil.Error, IOError) as err:
|
||||||
print('error copying to %s: %s' % (args.o, err), file=stderr)
|
print('error copying to %s: %s' % (args.o, err), file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
try:
|
||||||
|
check_call(['zip', '-d', args.o] + exclude)
|
||||||
|
except CalledProcessError as err:
|
||||||
|
print('error removing files from zip: %s' % err, file=stderr)
|
||||||
|
exit(1)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
link(cache_ent, args.o)
|
||||||
|
except OSError as err:
|
||||||
|
try:
|
||||||
|
shutil.copyfile(cache_ent, args.o)
|
||||||
|
except (shutil.Error, IOError) as err:
|
||||||
|
print('error copying to %s: %s' % (args.o, err), file=stderr)
|
||||||
|
exit(1)
|
||||||
|
@ -30,20 +30,20 @@ MAIN = '//tools/eclipse:classpath'
|
|||||||
GWT = '//gerrit-gwtui:ui_module'
|
GWT = '//gerrit-gwtui:ui_module'
|
||||||
AUTO = '//lib/auto:auto-value'
|
AUTO = '//lib/auto:auto-value'
|
||||||
JRE = '/'.join([
|
JRE = '/'.join([
|
||||||
'org.eclipse.jdt.launching.JRE_CONTAINER',
|
'org.eclipse.jdt.launching.JRE_CONTAINER',
|
||||||
'org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType',
|
'org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType',
|
||||||
'JavaSE-1.8',
|
'JavaSE-1.8',
|
||||||
])
|
])
|
||||||
# Map of targets to corresponding classpath collector rules
|
# Map of targets to corresponding classpath collector rules
|
||||||
cp_targets = {
|
cp_targets = {
|
||||||
AUTO: '//tools/eclipse:autovalue_classpath_collect',
|
AUTO: '//tools/eclipse:autovalue_classpath_collect',
|
||||||
GWT: '//tools/eclipse:gwt_classpath_collect',
|
GWT: '//tools/eclipse:gwt_classpath_collect',
|
||||||
MAIN: '//tools/eclipse:main_classpath_collect',
|
MAIN: '//tools/eclipse:main_classpath_collect',
|
||||||
}
|
}
|
||||||
|
|
||||||
ROOT = path.abspath(__file__)
|
ROOT = path.abspath(__file__)
|
||||||
while not path.exists(path.join(ROOT, 'WORKSPACE')):
|
while not path.exists(path.join(ROOT, 'WORKSPACE')):
|
||||||
ROOT = path.dirname(ROOT)
|
ROOT = path.dirname(ROOT)
|
||||||
|
|
||||||
opts = OptionParser()
|
opts = OptionParser()
|
||||||
opts.add_option('--plugins', help='create eclipse projects for plugins',
|
opts.add_option('--plugins', help='create eclipse projects for plugins',
|
||||||
@ -56,38 +56,43 @@ args, _ = opts.parse_args()
|
|||||||
|
|
||||||
batch_option = '--batch' if args.batch else None
|
batch_option = '--batch' if args.batch else None
|
||||||
|
|
||||||
|
|
||||||
def _build_bazel_cmd(*args):
|
def _build_bazel_cmd(*args):
|
||||||
cmd = ['bazel']
|
cmd = ['bazel']
|
||||||
if batch_option:
|
if batch_option:
|
||||||
cmd.append('--batch')
|
cmd.append('--batch')
|
||||||
for arg in args:
|
for arg in args:
|
||||||
cmd.append(arg)
|
cmd.append(arg)
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def retrieve_ext_location():
|
def retrieve_ext_location():
|
||||||
return check_output(_build_bazel_cmd('info', 'output_base')).strip()
|
return check_output(_build_bazel_cmd('info', 'output_base')).strip()
|
||||||
|
|
||||||
|
|
||||||
def gen_bazel_path():
|
def gen_bazel_path():
|
||||||
bazel = check_output(['which', 'bazel']).strip().decode('UTF-8')
|
bazel = check_output(['which', 'bazel']).strip().decode('UTF-8')
|
||||||
with open(path.join(ROOT, ".bazel_path"), 'w') as fd:
|
with open(path.join(ROOT, ".bazel_path"), 'w') as fd:
|
||||||
fd.write("bazel=%s\n" % bazel)
|
fd.write("bazel=%s\n" % bazel)
|
||||||
fd.write("PATH=%s\n" % environ["PATH"])
|
fd.write("PATH=%s\n" % environ["PATH"])
|
||||||
|
|
||||||
|
|
||||||
def _query_classpath(target):
|
def _query_classpath(target):
|
||||||
deps = []
|
deps = []
|
||||||
t = cp_targets[target]
|
t = cp_targets[target]
|
||||||
try:
|
try:
|
||||||
check_call(_build_bazel_cmd('build', t))
|
check_call(_build_bazel_cmd('build', t))
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
exit(1)
|
exit(1)
|
||||||
name = 'bazel-bin/tools/eclipse/' + t.split(':')[1] + '.runtime_classpath'
|
name = 'bazel-bin/tools/eclipse/' + t.split(':')[1] + '.runtime_classpath'
|
||||||
deps = [line.rstrip('\n') for line in open(name)]
|
deps = [line.rstrip('\n') for line in open(name)]
|
||||||
return deps
|
return deps
|
||||||
|
|
||||||
|
|
||||||
def gen_project(name='gerrit', root=ROOT):
|
def gen_project(name='gerrit', root=ROOT):
|
||||||
p = path.join(root, '.project')
|
p = path.join(root, '.project')
|
||||||
with open(p, 'w') as fd:
|
with open(p, 'w') as fd:
|
||||||
print("""\
|
print("""\
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<projectDescription>
|
<projectDescription>
|
||||||
<name>%(name)s</name>
|
<name>%(name)s</name>
|
||||||
@ -102,16 +107,17 @@ def gen_project(name='gerrit', root=ROOT):
|
|||||||
</projectDescription>\
|
</projectDescription>\
|
||||||
""" % {"name": name}, file=fd)
|
""" % {"name": name}, file=fd)
|
||||||
|
|
||||||
|
|
||||||
def gen_plugin_classpath(root):
|
def gen_plugin_classpath(root):
|
||||||
p = path.join(root, '.classpath')
|
p = path.join(root, '.classpath')
|
||||||
with open(p, 'w') as fd:
|
with open(p, 'w') as fd:
|
||||||
if path.exists(path.join(root, 'src', 'test', 'java')):
|
if path.exists(path.join(root, 'src', 'test', 'java')):
|
||||||
testpath = """
|
testpath = """
|
||||||
<classpathentry excluding="**/BUILD" kind="src" path="src/test/java"\
|
<classpathentry excluding="**/BUILD" kind="src" path="src/test/java"\
|
||||||
out="eclipse-out/test"/>"""
|
out="eclipse-out/test"/>"""
|
||||||
else:
|
else:
|
||||||
testpath = ""
|
testpath = ""
|
||||||
print("""\
|
print("""\
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<classpath>
|
<classpath>
|
||||||
<classpathentry excluding="**/BUILD" kind="src" path="src/main/java"/>%(testpath)s
|
<classpathentry excluding="**/BUILD" kind="src" path="src/main/java"/>%(testpath)s
|
||||||
@ -120,186 +126,189 @@ def gen_plugin_classpath(root):
|
|||||||
<classpathentry kind="output" path="eclipse-out/classes"/>
|
<classpathentry kind="output" path="eclipse-out/classes"/>
|
||||||
</classpath>""" % {"testpath": testpath}, file=fd)
|
</classpath>""" % {"testpath": testpath}, file=fd)
|
||||||
|
|
||||||
|
|
||||||
def gen_classpath(ext):
|
def gen_classpath(ext):
|
||||||
def make_classpath():
|
def make_classpath():
|
||||||
impl = minidom.getDOMImplementation()
|
impl = minidom.getDOMImplementation()
|
||||||
return impl.createDocument(None, 'classpath', None)
|
return impl.createDocument(None, 'classpath', None)
|
||||||
|
|
||||||
def classpathentry(kind, path, src=None, out=None, exported=None):
|
def classpathentry(kind, path, src=None, out=None, exported=None):
|
||||||
e = doc.createElement('classpathentry')
|
e = doc.createElement('classpathentry')
|
||||||
e.setAttribute('kind', kind)
|
e.setAttribute('kind', kind)
|
||||||
# TODO(davido): Remove this and other exclude BUILD files hack
|
# TODO(davido): Remove this and other exclude BUILD files hack
|
||||||
# when this Bazel bug is fixed:
|
# when this Bazel bug is fixed:
|
||||||
# https://github.com/bazelbuild/bazel/issues/1083
|
# https://github.com/bazelbuild/bazel/issues/1083
|
||||||
if kind == 'src':
|
if kind == 'src':
|
||||||
e.setAttribute('excluding', '**/BUILD')
|
e.setAttribute('excluding', '**/BUILD')
|
||||||
e.setAttribute('path', path)
|
e.setAttribute('path', path)
|
||||||
if src:
|
if src:
|
||||||
e.setAttribute('sourcepath', src)
|
e.setAttribute('sourcepath', src)
|
||||||
if out:
|
if out:
|
||||||
e.setAttribute('output', out)
|
e.setAttribute('output', out)
|
||||||
if exported:
|
if exported:
|
||||||
e.setAttribute('exported', 'true')
|
e.setAttribute('exported', 'true')
|
||||||
doc.documentElement.appendChild(e)
|
doc.documentElement.appendChild(e)
|
||||||
|
|
||||||
doc = make_classpath()
|
doc = make_classpath()
|
||||||
src = set()
|
src = set()
|
||||||
lib = set()
|
lib = set()
|
||||||
proto = set()
|
proto = set()
|
||||||
gwt_src = set()
|
gwt_src = set()
|
||||||
gwt_lib = set()
|
gwt_lib = set()
|
||||||
plugins = set()
|
plugins = set()
|
||||||
|
|
||||||
# Classpath entries are absolute for cross-cell support
|
# Classpath entries are absolute for cross-cell support
|
||||||
java_library = re.compile('bazel-out/.*?-fastbuild/bin/(.*)/[^/]+[.]jar$')
|
java_library = re.compile('bazel-out/.*?-fastbuild/bin/(.*)/[^/]+[.]jar$')
|
||||||
srcs = re.compile('(.*/external/[^/]+)/jar/(.*)[.]jar')
|
srcs = re.compile('(.*/external/[^/]+)/jar/(.*)[.]jar')
|
||||||
for p in _query_classpath(MAIN):
|
for p in _query_classpath(MAIN):
|
||||||
if p.endswith('-src.jar'):
|
if p.endswith('-src.jar'):
|
||||||
# gwt_module() depends on -src.jar for Java to JavaScript compiles.
|
# gwt_module() depends on -src.jar for Java to JavaScript compiles.
|
||||||
if p.startswith("external"):
|
if p.startswith("external"):
|
||||||
p = path.join(ext, p)
|
p = path.join(ext, p)
|
||||||
gwt_lib.add(p)
|
gwt_lib.add(p)
|
||||||
continue
|
|
||||||
|
|
||||||
m = java_library.match(p)
|
|
||||||
if m:
|
|
||||||
src.add(m.group(1))
|
|
||||||
# Exceptions: both source and lib
|
|
||||||
if p.endswith('libquery_parser.jar') or \
|
|
||||||
p.endswith('libgerrit-prolog-common.jar'):
|
|
||||||
lib.add(p)
|
|
||||||
# JGit dependency from external repository
|
|
||||||
if 'gerrit-' not in p and 'jgit' in p:
|
|
||||||
lib.add(p)
|
|
||||||
# Assume any jars in /proto/ are from java_proto_library rules
|
|
||||||
if '/bin/proto/' in p:
|
|
||||||
proto.add(p)
|
|
||||||
else:
|
|
||||||
# Don't mess up with Bazel internal test runner dependencies.
|
|
||||||
# When we use Eclipse we rely on it for running the tests
|
|
||||||
if p.endswith("external/bazel_tools/tools/jdk/TestRunner_deploy.jar"):
|
|
||||||
continue
|
|
||||||
if p.startswith("external"):
|
|
||||||
p = path.join(ext, p)
|
|
||||||
lib.add(p)
|
|
||||||
|
|
||||||
for p in _query_classpath(GWT):
|
|
||||||
m = java_library.match(p)
|
|
||||||
if m:
|
|
||||||
gwt_src.add(m.group(1))
|
|
||||||
|
|
||||||
classpathentry('src', 'java')
|
|
||||||
classpathentry('src', 'javatests', out='eclipse-out/test')
|
|
||||||
classpathentry('src', 'resources')
|
|
||||||
for s in sorted(src):
|
|
||||||
out = None
|
|
||||||
|
|
||||||
if s.startswith('lib/'):
|
|
||||||
out = 'eclipse-out/lib'
|
|
||||||
elif s.startswith('plugins/'):
|
|
||||||
if args.plugins:
|
|
||||||
plugins.add(s)
|
|
||||||
continue
|
|
||||||
out = 'eclipse-out/' + s
|
|
||||||
|
|
||||||
p = path.join(s, 'java')
|
|
||||||
if path.exists(p):
|
|
||||||
classpathentry('src', p, out=out)
|
|
||||||
continue
|
|
||||||
|
|
||||||
for env in ['main', 'test']:
|
|
||||||
o = None
|
|
||||||
if out:
|
|
||||||
o = out + '/' + env
|
|
||||||
elif env == 'test':
|
|
||||||
o = 'eclipse-out/test'
|
|
||||||
|
|
||||||
for srctype in ['java', 'resources']:
|
|
||||||
p = path.join(s, 'src', env, srctype)
|
|
||||||
if path.exists(p):
|
|
||||||
classpathentry('src', p, out=o)
|
|
||||||
|
|
||||||
for libs in [lib, gwt_lib]:
|
|
||||||
for j in sorted(libs):
|
|
||||||
s = None
|
|
||||||
m = srcs.match(j)
|
|
||||||
if m:
|
|
||||||
prefix = m.group(1)
|
|
||||||
suffix = m.group(2)
|
|
||||||
p = path.join(prefix, "jar", "%s-src.jar" % suffix)
|
|
||||||
if path.exists(p):
|
|
||||||
s = p
|
|
||||||
if args.plugins:
|
|
||||||
classpathentry('lib', j, s, exported=True)
|
|
||||||
else:
|
|
||||||
# Filter out the source JARs that we pull through transitive closure of
|
|
||||||
# GWT plugin API (we add source directories themself). Exception is
|
|
||||||
# libEdit-src.jar, that is needed for GWT SDM to work.
|
|
||||||
m = java_library.match(j)
|
|
||||||
if m:
|
|
||||||
if m.group(1).startswith("gerrit-") and \
|
|
||||||
j.endswith("-src.jar") and \
|
|
||||||
not j.endswith("libEdit-src.jar"):
|
|
||||||
continue
|
continue
|
||||||
classpathentry('lib', j, s)
|
|
||||||
|
|
||||||
for p in sorted(proto):
|
m = java_library.match(p)
|
||||||
s = p.replace('-fastbuild/bin/proto/lib', '-fastbuild/genfiles/proto/')
|
if m:
|
||||||
s = s.replace('.jar', '-src.jar')
|
src.add(m.group(1))
|
||||||
classpathentry('lib', p, s)
|
# Exceptions: both source and lib
|
||||||
|
if p.endswith('libquery_parser.jar') or \
|
||||||
|
p.endswith('libgerrit-prolog-common.jar'):
|
||||||
|
lib.add(p)
|
||||||
|
# JGit dependency from external repository
|
||||||
|
if 'gerrit-' not in p and 'jgit' in p:
|
||||||
|
lib.add(p)
|
||||||
|
# Assume any jars in /proto/ are from java_proto_library rules
|
||||||
|
if '/bin/proto/' in p:
|
||||||
|
proto.add(p)
|
||||||
|
else:
|
||||||
|
# Don't mess up with Bazel internal test runner dependencies.
|
||||||
|
# When we use Eclipse we rely on it for running the tests
|
||||||
|
if p.endswith("external/bazel_tools/tools/jdk/TestRunner_deploy.jar"):
|
||||||
|
continue
|
||||||
|
if p.startswith("external"):
|
||||||
|
p = path.join(ext, p)
|
||||||
|
lib.add(p)
|
||||||
|
|
||||||
for s in sorted(gwt_src):
|
for p in _query_classpath(GWT):
|
||||||
p = path.join(ROOT, s, 'src', 'main', 'java')
|
m = java_library.match(p)
|
||||||
if path.exists(p):
|
if m:
|
||||||
classpathentry('lib', p, out='eclipse-out/gwtsrc')
|
gwt_src.add(m.group(1))
|
||||||
|
|
||||||
classpathentry('con', JRE)
|
classpathentry('src', 'java')
|
||||||
classpathentry('output', 'eclipse-out/classes')
|
classpathentry('src', 'javatests', out='eclipse-out/test')
|
||||||
|
classpathentry('src', 'resources')
|
||||||
|
for s in sorted(src):
|
||||||
|
out = None
|
||||||
|
|
||||||
p = path.join(ROOT, '.classpath')
|
if s.startswith('lib/'):
|
||||||
with open(p, 'w') as fd:
|
out = 'eclipse-out/lib'
|
||||||
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
|
elif s.startswith('plugins/'):
|
||||||
|
if args.plugins:
|
||||||
|
plugins.add(s)
|
||||||
|
continue
|
||||||
|
out = 'eclipse-out/' + s
|
||||||
|
|
||||||
|
p = path.join(s, 'java')
|
||||||
|
if path.exists(p):
|
||||||
|
classpathentry('src', p, out=out)
|
||||||
|
continue
|
||||||
|
|
||||||
|
for env in ['main', 'test']:
|
||||||
|
o = None
|
||||||
|
if out:
|
||||||
|
o = out + '/' + env
|
||||||
|
elif env == 'test':
|
||||||
|
o = 'eclipse-out/test'
|
||||||
|
|
||||||
|
for srctype in ['java', 'resources']:
|
||||||
|
p = path.join(s, 'src', env, srctype)
|
||||||
|
if path.exists(p):
|
||||||
|
classpathentry('src', p, out=o)
|
||||||
|
|
||||||
|
for libs in [lib, gwt_lib]:
|
||||||
|
for j in sorted(libs):
|
||||||
|
s = None
|
||||||
|
m = srcs.match(j)
|
||||||
|
if m:
|
||||||
|
prefix = m.group(1)
|
||||||
|
suffix = m.group(2)
|
||||||
|
p = path.join(prefix, "jar", "%s-src.jar" % suffix)
|
||||||
|
if path.exists(p):
|
||||||
|
s = p
|
||||||
|
if args.plugins:
|
||||||
|
classpathentry('lib', j, s, exported=True)
|
||||||
|
else:
|
||||||
|
# Filter out the source JARs that we pull through transitive closure of
|
||||||
|
# GWT plugin API (we add source directories themself). Exception is
|
||||||
|
# libEdit-src.jar, that is needed for GWT SDM to work.
|
||||||
|
m = java_library.match(j)
|
||||||
|
if m:
|
||||||
|
if m.group(1).startswith("gerrit-") and \
|
||||||
|
j.endswith("-src.jar") and \
|
||||||
|
not j.endswith("libEdit-src.jar"):
|
||||||
|
continue
|
||||||
|
classpathentry('lib', j, s)
|
||||||
|
|
||||||
|
for p in sorted(proto):
|
||||||
|
s = p.replace('-fastbuild/bin/proto/lib', '-fastbuild/genfiles/proto/')
|
||||||
|
s = s.replace('.jar', '-src.jar')
|
||||||
|
classpathentry('lib', p, s)
|
||||||
|
|
||||||
|
for s in sorted(gwt_src):
|
||||||
|
p = path.join(ROOT, s, 'src', 'main', 'java')
|
||||||
|
if path.exists(p):
|
||||||
|
classpathentry('lib', p, out='eclipse-out/gwtsrc')
|
||||||
|
|
||||||
|
classpathentry('con', JRE)
|
||||||
|
classpathentry('output', 'eclipse-out/classes')
|
||||||
|
|
||||||
|
p = path.join(ROOT, '.classpath')
|
||||||
|
with open(p, 'w') as fd:
|
||||||
|
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
|
||||||
|
|
||||||
|
if args.plugins:
|
||||||
|
for plugin in plugins:
|
||||||
|
plugindir = path.join(ROOT, plugin)
|
||||||
|
try:
|
||||||
|
gen_project(plugin.replace('plugins/', ""), plugindir)
|
||||||
|
gen_plugin_classpath(plugindir)
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
print('error generating project for %s: %s' % (plugin, err),
|
||||||
|
file=sys.stderr)
|
||||||
|
|
||||||
if args.plugins:
|
|
||||||
for plugin in plugins:
|
|
||||||
plugindir = path.join(ROOT, plugin)
|
|
||||||
try:
|
|
||||||
gen_project(plugin.replace('plugins/', ""), plugindir)
|
|
||||||
gen_plugin_classpath(plugindir)
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
print('error generating project for %s: %s' % (plugin, err),
|
|
||||||
file=sys.stderr)
|
|
||||||
|
|
||||||
def gen_factorypath(ext):
|
def gen_factorypath(ext):
|
||||||
doc = minidom.getDOMImplementation().createDocument(None, 'factorypath', None)
|
doc = minidom.getDOMImplementation().createDocument(None, 'factorypath', None)
|
||||||
for jar in _query_classpath(AUTO):
|
for jar in _query_classpath(AUTO):
|
||||||
e = doc.createElement('factorypathentry')
|
e = doc.createElement('factorypathentry')
|
||||||
e.setAttribute('kind', 'EXTJAR')
|
e.setAttribute('kind', 'EXTJAR')
|
||||||
e.setAttribute('id', path.join(ext, jar))
|
e.setAttribute('id', path.join(ext, jar))
|
||||||
e.setAttribute('enabled', 'true')
|
e.setAttribute('enabled', 'true')
|
||||||
e.setAttribute('runInBatchMode', 'false')
|
e.setAttribute('runInBatchMode', 'false')
|
||||||
doc.documentElement.appendChild(e)
|
doc.documentElement.appendChild(e)
|
||||||
|
|
||||||
|
p = path.join(ROOT, '.factorypath')
|
||||||
|
with open(p, 'w') as fd:
|
||||||
|
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
|
||||||
|
|
||||||
p = path.join(ROOT, '.factorypath')
|
|
||||||
with open(p, 'w') as fd:
|
|
||||||
doc.writexml(fd, addindent='\t', newl='\n', encoding='UTF-8')
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ext_location = retrieve_ext_location().decode("utf-8")
|
ext_location = retrieve_ext_location().decode("utf-8")
|
||||||
gen_project(args.project_name)
|
gen_project(args.project_name)
|
||||||
gen_classpath(ext_location)
|
gen_classpath(ext_location)
|
||||||
gen_factorypath(ext_location)
|
gen_factorypath(ext_location)
|
||||||
gen_bazel_path()
|
gen_bazel_path()
|
||||||
|
|
||||||
# TODO(davido): Remove this when GWT gone
|
# TODO(davido): Remove this when GWT gone
|
||||||
gwt_working_dir = ".gwt_work_dir"
|
gwt_working_dir = ".gwt_work_dir"
|
||||||
if not path.isdir(gwt_working_dir):
|
if not path.isdir(gwt_working_dir):
|
||||||
makedirs(path.join(ROOT, gwt_working_dir))
|
makedirs(path.join(ROOT, gwt_working_dir))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
check_call(_build_bazel_cmd('build', MAIN, GWT, '//java/org/eclipse/jgit:libEdit-src.jar'))
|
check_call(_build_bazel_cmd('build', MAIN, GWT, '//java/org/eclipse/jgit:libEdit-src.jar'))
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
exit(1)
|
exit(1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print('Interrupted by user', file=sys.stderr)
|
print('Interrupted by user', file=sys.stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
@ -33,137 +33,139 @@ import bowerutil
|
|||||||
|
|
||||||
# list of licenses for packages that don't specify one in their bower.json file.
|
# list of licenses for packages that don't specify one in their bower.json file.
|
||||||
package_licenses = {
|
package_licenses = {
|
||||||
"codemirror-minified": "codemirror-minified",
|
"codemirror-minified": "codemirror-minified",
|
||||||
"es6-promise": "es6-promise",
|
"es6-promise": "es6-promise",
|
||||||
"fetch": "fetch",
|
"fetch": "fetch",
|
||||||
"font-roboto": "polymer",
|
"font-roboto": "polymer",
|
||||||
"iron-a11y-announcer": "polymer",
|
"iron-a11y-announcer": "polymer",
|
||||||
"iron-a11y-keys-behavior": "polymer",
|
"iron-a11y-keys-behavior": "polymer",
|
||||||
"iron-autogrow-textarea": "polymer",
|
"iron-autogrow-textarea": "polymer",
|
||||||
"iron-behaviors": "polymer",
|
"iron-behaviors": "polymer",
|
||||||
"iron-dropdown": "polymer",
|
"iron-dropdown": "polymer",
|
||||||
"iron-fit-behavior": "polymer",
|
"iron-fit-behavior": "polymer",
|
||||||
"iron-flex-layout": "polymer",
|
"iron-flex-layout": "polymer",
|
||||||
"iron-form-element-behavior": "polymer",
|
"iron-form-element-behavior": "polymer",
|
||||||
"iron-icon": "polymer",
|
"iron-icon": "polymer",
|
||||||
"iron-iconset-svg": "polymer",
|
"iron-iconset-svg": "polymer",
|
||||||
"iron-input": "polymer",
|
"iron-input": "polymer",
|
||||||
"iron-menu-behavior": "polymer",
|
"iron-menu-behavior": "polymer",
|
||||||
"iron-meta": "polymer",
|
"iron-meta": "polymer",
|
||||||
"iron-overlay-behavior": "polymer",
|
"iron-overlay-behavior": "polymer",
|
||||||
"iron-resizable-behavior": "polymer",
|
"iron-resizable-behavior": "polymer",
|
||||||
"iron-selector": "polymer",
|
"iron-selector": "polymer",
|
||||||
"iron-validatable-behavior": "polymer",
|
"iron-validatable-behavior": "polymer",
|
||||||
"moment": "moment",
|
"moment": "moment",
|
||||||
"neon-animation": "polymer",
|
"neon-animation": "polymer",
|
||||||
"page": "page.js",
|
"page": "page.js",
|
||||||
"paper-button": "polymer",
|
"paper-button": "polymer",
|
||||||
"paper-icon-button": "polymer",
|
"paper-icon-button": "polymer",
|
||||||
"paper-input": "polymer",
|
"paper-input": "polymer",
|
||||||
"paper-item": "polymer",
|
"paper-item": "polymer",
|
||||||
"paper-listbox": "polymer",
|
"paper-listbox": "polymer",
|
||||||
"paper-toggle-button": "polymer",
|
"paper-toggle-button": "polymer",
|
||||||
"paper-styles": "polymer",
|
"paper-styles": "polymer",
|
||||||
"paper-tabs": "polymer",
|
"paper-tabs": "polymer",
|
||||||
"polymer": "polymer",
|
"polymer": "polymer",
|
||||||
"polymer-resin": "polymer",
|
"polymer-resin": "polymer",
|
||||||
"promise-polyfill": "promise-polyfill",
|
"promise-polyfill": "promise-polyfill",
|
||||||
"web-animations-js": "Apache2.0",
|
"web-animations-js": "Apache2.0",
|
||||||
"webcomponentsjs": "polymer",
|
"webcomponentsjs": "polymer",
|
||||||
"paper-material": "polymer",
|
"paper-material": "polymer",
|
||||||
"paper-styles": "polymer",
|
"paper-styles": "polymer",
|
||||||
"paper-behaviors": "polymer",
|
"paper-behaviors": "polymer",
|
||||||
"paper-ripple": "polymer",
|
"paper-ripple": "polymer",
|
||||||
"iron-checked-element-behavior": "polymer",
|
"iron-checked-element-behavior": "polymer",
|
||||||
"font-roboto": "polymer",
|
"font-roboto": "polymer",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def build_bower_json(version_targets, seeds):
|
def build_bower_json(version_targets, seeds):
|
||||||
"""Generate bower JSON file, return its path.
|
"""Generate bower JSON file, return its path.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
version_targets: bazel target names of the versions.json file.
|
version_targets: bazel target names of the versions.json file.
|
||||||
seeds: an iterable of bower package names of the seed packages, ie.
|
seeds: an iterable of bower package names of the seed packages, ie.
|
||||||
the packages whose versions we control manually.
|
the packages whose versions we control manually.
|
||||||
"""
|
"""
|
||||||
bower_json = collections.OrderedDict()
|
bower_json = collections.OrderedDict()
|
||||||
bower_json['name'] = 'bower2bazel-output'
|
bower_json['name'] = 'bower2bazel-output'
|
||||||
bower_json['version'] = '0.0.0'
|
bower_json['version'] = '0.0.0'
|
||||||
bower_json['description'] = 'Auto-generated bower.json for dependency management'
|
bower_json['description'] = 'Auto-generated bower.json for dependency management'
|
||||||
bower_json['private'] = True
|
bower_json['private'] = True
|
||||||
bower_json['dependencies'] = {}
|
bower_json['dependencies'] = {}
|
||||||
|
|
||||||
seeds = set(seeds)
|
seeds = set(seeds)
|
||||||
for v in version_targets:
|
for v in version_targets:
|
||||||
path = os.path.join("bazel-out/*-fastbuild/bin", v.lstrip("/").replace(":", "/"))
|
path = os.path.join("bazel-out/*-fastbuild/bin", v.lstrip("/").replace(":", "/"))
|
||||||
fs = glob.glob(path)
|
fs = glob.glob(path)
|
||||||
assert len(fs) == 1, '%s: file not found or multiple files found: %s' % (path, fs)
|
assert len(fs) == 1, '%s: file not found or multiple files found: %s' % (path, fs)
|
||||||
with open(fs[0]) as f:
|
with open(fs[0]) as f:
|
||||||
j = json.load(f)
|
j = json.load(f)
|
||||||
if "" in j:
|
if "" in j:
|
||||||
# drop dummy entries.
|
# drop dummy entries.
|
||||||
del j[""]
|
del j[""]
|
||||||
|
|
||||||
trimmed = {}
|
trimmed = {}
|
||||||
for k, v in j.items():
|
for k, v in j.items():
|
||||||
if k in seeds:
|
if k in seeds:
|
||||||
trimmed[k] = v
|
trimmed[k] = v
|
||||||
|
|
||||||
bower_json['dependencies'].update(trimmed)
|
bower_json['dependencies'].update(trimmed)
|
||||||
|
|
||||||
|
tmpdir = tempfile.mkdtemp()
|
||||||
|
ret = os.path.join(tmpdir, 'bower.json')
|
||||||
|
with open(ret, 'w') as f:
|
||||||
|
json.dump(bower_json, f, indent=2)
|
||||||
|
return ret
|
||||||
|
|
||||||
tmpdir = tempfile.mkdtemp()
|
|
||||||
ret = os.path.join(tmpdir, 'bower.json')
|
|
||||||
with open(ret, 'w') as f:
|
|
||||||
json.dump(bower_json, f, indent=2)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def decode(input):
|
def decode(input):
|
||||||
try:
|
try:
|
||||||
return input.decode("utf-8")
|
return input.decode("utf-8")
|
||||||
except TypeError:
|
except TypeError:
|
||||||
return input
|
return input
|
||||||
|
|
||||||
|
|
||||||
def bower_command(args):
|
def bower_command(args):
|
||||||
base = subprocess.check_output(["bazel", "info", "output_base"]).strip()
|
base = subprocess.check_output(["bazel", "info", "output_base"]).strip()
|
||||||
exp = os.path.join(decode(base), "external", "bower", "*npm_binary.tgz")
|
exp = os.path.join(decode(base), "external", "bower", "*npm_binary.tgz")
|
||||||
fs = sorted(glob.glob(exp))
|
fs = sorted(glob.glob(exp))
|
||||||
assert len(fs) == 1, "bower tarball not found or have multiple versions %s" % fs
|
assert len(fs) == 1, "bower tarball not found or have multiple versions %s" % fs
|
||||||
return ["python", os.getcwd() + "/tools/js/run_npm_binary.py", sorted(fs)[0]] + args
|
return ["python", os.getcwd() + "/tools/js/run_npm_binary.py", sorted(fs)[0]] + args
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
opts = optparse.OptionParser()
|
opts = optparse.OptionParser()
|
||||||
opts.add_option('-w', help='.bzl output for WORKSPACE')
|
opts.add_option('-w', help='.bzl output for WORKSPACE')
|
||||||
opts.add_option('-b', help='.bzl output for //lib:BUILD')
|
opts.add_option('-b', help='.bzl output for //lib:BUILD')
|
||||||
opts, args = opts.parse_args()
|
opts, args = opts.parse_args()
|
||||||
|
|
||||||
target_str = subprocess.check_output([
|
target_str = subprocess.check_output([
|
||||||
"bazel", "query", "kind(bower_component_bundle, //polygerrit-ui/...)"])
|
"bazel", "query", "kind(bower_component_bundle, //polygerrit-ui/...)"])
|
||||||
seed_str = subprocess.check_output([
|
seed_str = subprocess.check_output([
|
||||||
"bazel", "query", "attr(seed, 1, kind(bower_component, deps(//polygerrit-ui/...)))"])
|
"bazel", "query", "attr(seed, 1, kind(bower_component, deps(//polygerrit-ui/...)))"])
|
||||||
targets = [s for s in decode(target_str).split('\n') if s]
|
targets = [s for s in decode(target_str).split('\n') if s]
|
||||||
seeds = [s for s in decode(seed_str).split('\n') if s]
|
seeds = [s for s in decode(seed_str).split('\n') if s]
|
||||||
prefix = "//lib/js:"
|
prefix = "//lib/js:"
|
||||||
non_seeds = [s for s in seeds if not s.startswith(prefix)]
|
non_seeds = [s for s in seeds if not s.startswith(prefix)]
|
||||||
assert not non_seeds, non_seeds
|
assert not non_seeds, non_seeds
|
||||||
seeds = set([s[len(prefix):] for s in seeds])
|
seeds = set([s[len(prefix):] for s in seeds])
|
||||||
|
|
||||||
version_targets = [t + "-versions.json" for t in targets]
|
version_targets = [t + "-versions.json" for t in targets]
|
||||||
subprocess.check_call(['bazel', 'build'] + version_targets)
|
subprocess.check_call(['bazel', 'build'] + version_targets)
|
||||||
bower_json_path = build_bower_json(version_targets, seeds)
|
bower_json_path = build_bower_json(version_targets, seeds)
|
||||||
dir = os.path.dirname(bower_json_path)
|
dir = os.path.dirname(bower_json_path)
|
||||||
cmd = bower_command(["install"])
|
cmd = bower_command(["install"])
|
||||||
|
|
||||||
build_out = sys.stdout
|
build_out = sys.stdout
|
||||||
if opts.b:
|
if opts.b:
|
||||||
build_out = open(opts.b + ".tmp", 'w')
|
build_out = open(opts.b + ".tmp", 'w')
|
||||||
|
|
||||||
ws_out = sys.stdout
|
ws_out = sys.stdout
|
||||||
if opts.b:
|
if opts.b:
|
||||||
ws_out = open(opts.w + ".tmp", 'w')
|
ws_out = open(opts.w + ".tmp", 'w')
|
||||||
|
|
||||||
header = """# DO NOT EDIT
|
header = """# DO NOT EDIT
|
||||||
# generated with the following command:
|
# generated with the following command:
|
||||||
#
|
#
|
||||||
# %s
|
# %s
|
||||||
@ -171,30 +173,30 @@ def main(args):
|
|||||||
|
|
||||||
""" % ' '.join(sys.argv)
|
""" % ' '.join(sys.argv)
|
||||||
|
|
||||||
ws_out.write(header)
|
ws_out.write(header)
|
||||||
build_out.write(header)
|
build_out.write(header)
|
||||||
|
|
||||||
oldwd = os.getcwd()
|
oldwd = os.getcwd()
|
||||||
os.chdir(dir)
|
os.chdir(dir)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
interpret_bower_json(seeds, ws_out, build_out)
|
interpret_bower_json(seeds, ws_out, build_out)
|
||||||
ws_out.close()
|
ws_out.close()
|
||||||
build_out.close()
|
build_out.close()
|
||||||
|
|
||||||
os.chdir(oldwd)
|
os.chdir(oldwd)
|
||||||
os.rename(opts.w + ".tmp", opts.w)
|
os.rename(opts.w + ".tmp", opts.w)
|
||||||
os.rename(opts.b + ".tmp", opts.b)
|
os.rename(opts.b + ".tmp", opts.b)
|
||||||
|
|
||||||
|
|
||||||
def dump_workspace(data, seeds, out):
|
def dump_workspace(data, seeds, out):
|
||||||
out.write('load("//tools/bzl:js.bzl", "bower_archive")\n\n')
|
out.write('load("//tools/bzl:js.bzl", "bower_archive")\n\n')
|
||||||
out.write('def load_bower_archives():\n')
|
out.write('def load_bower_archives():\n')
|
||||||
|
|
||||||
for d in data:
|
for d in data:
|
||||||
if d["name"] in seeds:
|
if d["name"] in seeds:
|
||||||
continue
|
continue
|
||||||
out.write(""" bower_archive(
|
out.write(""" bower_archive(
|
||||||
name = "%(name)s",
|
name = "%(name)s",
|
||||||
package = "%(normalized-name)s",
|
package = "%(normalized-name)s",
|
||||||
version = "%(version)s",
|
version = "%(version)s",
|
||||||
@ -203,48 +205,48 @@ def dump_workspace(data, seeds, out):
|
|||||||
|
|
||||||
|
|
||||||
def dump_build(data, seeds, out):
|
def dump_build(data, seeds, out):
|
||||||
out.write('load("//tools/bzl:js.bzl", "bower_component")\n\n')
|
out.write('load("//tools/bzl:js.bzl", "bower_component")\n\n')
|
||||||
out.write('def define_bower_components():\n')
|
out.write('def define_bower_components():\n')
|
||||||
for d in data:
|
for d in data:
|
||||||
out.write(" bower_component(\n")
|
out.write(" bower_component(\n")
|
||||||
out.write(" name = \"%s\",\n" % d["name"])
|
out.write(" name = \"%s\",\n" % d["name"])
|
||||||
out.write(" license = \"//lib:LICENSE-%s\",\n" % d["bazel-license"])
|
out.write(" license = \"//lib:LICENSE-%s\",\n" % d["bazel-license"])
|
||||||
deps = sorted(d.get("dependencies", {}).keys())
|
deps = sorted(d.get("dependencies", {}).keys())
|
||||||
if deps:
|
if deps:
|
||||||
if len(deps) == 1:
|
if len(deps) == 1:
|
||||||
out.write(" deps = [ \":%s\" ],\n" % deps[0])
|
out.write(" deps = [ \":%s\" ],\n" % deps[0])
|
||||||
else:
|
else:
|
||||||
out.write(" deps = [\n")
|
out.write(" deps = [\n")
|
||||||
for dep in deps:
|
for dep in deps:
|
||||||
out.write(" \":%s\",\n" % dep)
|
out.write(" \":%s\",\n" % dep)
|
||||||
out.write(" ],\n")
|
out.write(" ],\n")
|
||||||
if d["name"] in seeds:
|
if d["name"] in seeds:
|
||||||
out.write(" seed = True,\n")
|
out.write(" seed = True,\n")
|
||||||
out.write(" )\n")
|
out.write(" )\n")
|
||||||
# done
|
# done
|
||||||
|
|
||||||
|
|
||||||
def interpret_bower_json(seeds, ws_out, build_out):
|
def interpret_bower_json(seeds, ws_out, build_out):
|
||||||
out = subprocess.check_output(["find", "bower_components/", "-name", ".bower.json"])
|
out = subprocess.check_output(["find", "bower_components/", "-name", ".bower.json"])
|
||||||
|
|
||||||
data = []
|
data = []
|
||||||
for f in sorted(decode(out).split('\n')):
|
for f in sorted(decode(out).split('\n')):
|
||||||
if not f:
|
if not f:
|
||||||
continue
|
continue
|
||||||
pkg = json.load(open(f))
|
pkg = json.load(open(f))
|
||||||
pkg_name = pkg["name"]
|
pkg_name = pkg["name"]
|
||||||
|
|
||||||
pkg["bazel-sha1"] = bowerutil.hash_bower_component(
|
pkg["bazel-sha1"] = bowerutil.hash_bower_component(
|
||||||
hashlib.sha1(), os.path.dirname(f)).hexdigest()
|
hashlib.sha1(), os.path.dirname(f)).hexdigest()
|
||||||
license = package_licenses.get(pkg_name, "DO_NOT_DISTRIBUTE")
|
license = package_licenses.get(pkg_name, "DO_NOT_DISTRIBUTE")
|
||||||
|
|
||||||
pkg["bazel-license"] = license
|
pkg["bazel-license"] = license
|
||||||
pkg["normalized-name"] = pkg["_originalSource"]
|
pkg["normalized-name"] = pkg["_originalSource"]
|
||||||
data.append(pkg)
|
data.append(pkg)
|
||||||
|
|
||||||
dump_workspace(data, seeds, ws_out)
|
dump_workspace(data, seeds, ws_out)
|
||||||
dump_build(data, seeds, build_out)
|
dump_build(data, seeds, build_out)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main(sys.argv[1:])
|
main(sys.argv[1:])
|
||||||
|
@ -16,31 +16,31 @@ import os
|
|||||||
|
|
||||||
|
|
||||||
def hash_bower_component(hash_obj, path):
|
def hash_bower_component(hash_obj, path):
|
||||||
"""Hash the contents of a bower component directory.
|
"""Hash the contents of a bower component directory.
|
||||||
|
|
||||||
This is a stable hash of a directory downloaded with `bower install`, minus
|
This is a stable hash of a directory downloaded with `bower install`, minus
|
||||||
the .bower.json file, which is autogenerated each time by bower. Used in lieu
|
the .bower.json file, which is autogenerated each time by bower. Used in lieu
|
||||||
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
|
of hashing a zipfile of the contents, since zipfiles are difficult to hash in
|
||||||
a stable manner.
|
a stable manner.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
hash_obj: an open hash object, e.g. hashlib.sha1().
|
hash_obj: an open hash object, e.g. hashlib.sha1().
|
||||||
path: path to the directory to hash.
|
path: path to the directory to hash.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The passed-in hash_obj.
|
The passed-in hash_obj.
|
||||||
"""
|
"""
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
raise ValueError('Not a directory: %s' % path)
|
raise ValueError('Not a directory: %s' % path)
|
||||||
|
|
||||||
path = os.path.abspath(path)
|
path = os.path.abspath(path)
|
||||||
for root, dirs, files in os.walk(path):
|
for root, dirs, files in os.walk(path):
|
||||||
dirs.sort()
|
dirs.sort()
|
||||||
for f in sorted(files):
|
for f in sorted(files):
|
||||||
if f == '.bower.json':
|
if f == '.bower.json':
|
||||||
continue
|
continue
|
||||||
p = os.path.join(root, f)
|
p = os.path.join(root, f)
|
||||||
hash_obj.update(p[len(path)+1:].encode("utf-8"))
|
hash_obj.update(p[len(path)+1:].encode("utf-8"))
|
||||||
hash_obj.update(open(p, "rb").read())
|
hash_obj.update(open(p, "rb").read())
|
||||||
|
|
||||||
return hash_obj
|
return hash_obj
|
||||||
|
@ -30,99 +30,99 @@ CACHE_DIR = os.path.expanduser(os.path.join(
|
|||||||
|
|
||||||
|
|
||||||
def bower_cmd(bower, *args):
|
def bower_cmd(bower, *args):
|
||||||
cmd = bower.split(' ')
|
cmd = bower.split(' ')
|
||||||
cmd.extend(args)
|
cmd.extend(args)
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def bower_info(bower, name, package, version):
|
def bower_info(bower, name, package, version):
|
||||||
cmd = bower_cmd(bower, '-l=error', '-j',
|
cmd = bower_cmd(bower, '-l=error', '-j',
|
||||||
'info', '%s#%s' % (package, version))
|
'info', '%s#%s' % (package, version))
|
||||||
try:
|
try:
|
||||||
p = subprocess.Popen(cmd , stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
except:
|
except:
|
||||||
sys.stderr.write("error executing: %s\n" % ' '.join(cmd))
|
sys.stderr.write("error executing: %s\n" % ' '.join(cmd))
|
||||||
raise
|
raise
|
||||||
out, err = p.communicate()
|
out, err = p.communicate()
|
||||||
if p.returncode:
|
if p.returncode:
|
||||||
sys.stderr.write(err)
|
sys.stderr.write(err)
|
||||||
raise OSError('Command failed: %s' % ' '.join(cmd))
|
raise OSError('Command failed: %s' % ' '.join(cmd))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
info = json.loads(out)
|
info = json.loads(out)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError('invalid JSON from %s:\n%s' % (" ".join(cmd), out))
|
raise ValueError('invalid JSON from %s:\n%s' % (" ".join(cmd), out))
|
||||||
info_name = info.get('name')
|
info_name = info.get('name')
|
||||||
if info_name != name:
|
if info_name != name:
|
||||||
raise ValueError('expected package name %s, got: %s' % (name, info_name))
|
raise ValueError('expected package name %s, got: %s' % (name, info_name))
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
def ignore_deps(info):
|
def ignore_deps(info):
|
||||||
# Tell bower to ignore dependencies so we just download this component. This
|
# Tell bower to ignore dependencies so we just download this component. This
|
||||||
# is just an optimization, since we only pick out the component we need, but
|
# is just an optimization, since we only pick out the component we need, but
|
||||||
# it's important when downloading sizable dependency trees.
|
# it's important when downloading sizable dependency trees.
|
||||||
#
|
#
|
||||||
# As of 1.6.5 I don't think ignoredDependencies can be specified on the
|
# As of 1.6.5 I don't think ignoredDependencies can be specified on the
|
||||||
# command line with --config, so we have to create .bowerrc.
|
# command line with --config, so we have to create .bowerrc.
|
||||||
deps = info.get('dependencies')
|
deps = info.get('dependencies')
|
||||||
if deps:
|
if deps:
|
||||||
with open(os.path.join('.bowerrc'), 'w') as f:
|
with open(os.path.join('.bowerrc'), 'w') as f:
|
||||||
json.dump({'ignoredDependencies': list(deps.keys())}, f)
|
json.dump({'ignoredDependencies': list(deps.keys())}, f)
|
||||||
|
|
||||||
|
|
||||||
def cache_entry(name, package, version, sha1):
|
def cache_entry(name, package, version, sha1):
|
||||||
if not sha1:
|
if not sha1:
|
||||||
sha1 = hashlib.sha1('%s#%s' % (package, version)).hexdigest()
|
sha1 = hashlib.sha1('%s#%s' % (package, version)).hexdigest()
|
||||||
return os.path.join(CACHE_DIR, '%s-%s.zip-%s' % (name, version, sha1))
|
return os.path.join(CACHE_DIR, '%s-%s.zip-%s' % (name, version, sha1))
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
opts = optparse.OptionParser()
|
opts = optparse.OptionParser()
|
||||||
opts.add_option('-n', help='short name of component')
|
opts.add_option('-n', help='short name of component')
|
||||||
opts.add_option('-b', help='bower command')
|
opts.add_option('-b', help='bower command')
|
||||||
opts.add_option('-p', help='full package name of component')
|
opts.add_option('-p', help='full package name of component')
|
||||||
opts.add_option('-v', help='version number')
|
opts.add_option('-v', help='version number')
|
||||||
opts.add_option('-s', help='expected content sha1')
|
opts.add_option('-s', help='expected content sha1')
|
||||||
opts.add_option('-o', help='output file location')
|
opts.add_option('-o', help='output file location')
|
||||||
opts, args_ = opts.parse_args(args)
|
opts, args_ = opts.parse_args(args)
|
||||||
|
|
||||||
assert opts.p
|
assert opts.p
|
||||||
assert opts.v
|
assert opts.v
|
||||||
assert opts.n
|
assert opts.n
|
||||||
|
|
||||||
cwd = os.getcwd()
|
cwd = os.getcwd()
|
||||||
outzip = os.path.join(cwd, opts.o)
|
outzip = os.path.join(cwd, opts.o)
|
||||||
cached = cache_entry(opts.n, opts.p, opts.v, opts.s)
|
cached = cache_entry(opts.n, opts.p, opts.v, opts.s)
|
||||||
|
|
||||||
if not os.path.exists(cached):
|
if not os.path.exists(cached):
|
||||||
info = bower_info(opts.b, opts.n, opts.p, opts.v)
|
info = bower_info(opts.b, opts.n, opts.p, opts.v)
|
||||||
ignore_deps(info)
|
ignore_deps(info)
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
bower_cmd(opts.b, '--quiet', 'install', '%s#%s' % (opts.p, opts.v)))
|
bower_cmd(opts.b, '--quiet', 'install', '%s#%s' % (opts.p, opts.v)))
|
||||||
bc = os.path.join(cwd, 'bower_components')
|
bc = os.path.join(cwd, 'bower_components')
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
['zip', '-q', '--exclude', '.bower.json', '-r', cached, opts.n],
|
['zip', '-q', '--exclude', '.bower.json', '-r', cached, opts.n],
|
||||||
cwd=bc)
|
cwd=bc)
|
||||||
|
|
||||||
if opts.s:
|
if opts.s:
|
||||||
path = os.path.join(bc, opts.n)
|
path = os.path.join(bc, opts.n)
|
||||||
sha1 = bowerutil.hash_bower_component(hashlib.sha1(), path).hexdigest()
|
sha1 = bowerutil.hash_bower_component(hashlib.sha1(), path).hexdigest()
|
||||||
if opts.s != sha1:
|
if opts.s != sha1:
|
||||||
print((
|
print((
|
||||||
'%s#%s:\n'
|
'%s#%s:\n'
|
||||||
'expected %s\n'
|
'expected %s\n'
|
||||||
'received %s\n') % (opts.p, opts.v, opts.s, sha1), file=sys.stderr)
|
'received %s\n') % (opts.p, opts.v, opts.s, sha1), file=sys.stderr)
|
||||||
try:
|
try:
|
||||||
os.remove(cached)
|
os.remove(cached)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if path.exists(cached):
|
if path.exists(cached):
|
||||||
print('error removing %s: %s' % (cached, err), file=sys.stderr)
|
print('error removing %s: %s' % (cached, err), file=sys.stderr)
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
shutil.copyfile(cached, outzip)
|
shutil.copyfile(cached, outzip)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(main(sys.argv[1:]))
|
sys.exit(main(sys.argv[1:]))
|
||||||
|
@ -32,49 +32,49 @@ import tempfile
|
|||||||
|
|
||||||
|
|
||||||
def is_bundled(tar):
|
def is_bundled(tar):
|
||||||
# No entries for directories, so scan for a matching prefix.
|
# No entries for directories, so scan for a matching prefix.
|
||||||
for entry in tar.getmembers():
|
for entry in tar.getmembers():
|
||||||
if entry.name.startswith('package/node_modules/'):
|
if entry.name.startswith('package/node_modules/'):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def bundle_dependencies():
|
def bundle_dependencies():
|
||||||
with open('package.json') as f:
|
with open('package.json') as f:
|
||||||
package = json.load(f)
|
package = json.load(f)
|
||||||
package['bundledDependencies'] = list(package['dependencies'].keys())
|
package['bundledDependencies'] = list(package['dependencies'].keys())
|
||||||
with open('package.json', 'w') as f:
|
with open('package.json', 'w') as f:
|
||||||
json.dump(package, f)
|
json.dump(package, f)
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
if len(args) != 2:
|
if len(args) != 2:
|
||||||
print('Usage: %s <package> <version>' % sys.argv[0], file=sys.stderr)
|
print('Usage: %s <package> <version>' % sys.argv[0], file=sys.stderr)
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
name, version = args
|
name, version = args
|
||||||
filename = '%s-%s.tgz' % (name, version)
|
filename = '%s-%s.tgz' % (name, version)
|
||||||
url = 'http://registry.npmjs.org/%s/-/%s' % (name, filename)
|
url = 'http://registry.npmjs.org/%s/-/%s' % (name, filename)
|
||||||
|
|
||||||
tmpdir = tempfile.mkdtemp();
|
tmpdir = tempfile.mkdtemp();
|
||||||
tgz = os.path.join(tmpdir, filename)
|
tgz = os.path.join(tmpdir, filename)
|
||||||
atexit.register(lambda: shutil.rmtree(tmpdir))
|
atexit.register(lambda: shutil.rmtree(tmpdir))
|
||||||
|
|
||||||
subprocess.check_call(['curl', '--proxy-anyauth', '-ksfo', tgz, url])
|
subprocess.check_call(['curl', '--proxy-anyauth', '-ksfo', tgz, url])
|
||||||
with tarfile.open(tgz, 'r:gz') as tar:
|
with tarfile.open(tgz, 'r:gz') as tar:
|
||||||
if is_bundled(tar):
|
if is_bundled(tar):
|
||||||
print('%s already has bundled node_modules' % filename)
|
print('%s already has bundled node_modules' % filename)
|
||||||
return 1
|
return 1
|
||||||
tar.extractall(path=tmpdir)
|
tar.extractall(path=tmpdir)
|
||||||
|
|
||||||
oldpwd = os.getcwd()
|
oldpwd = os.getcwd()
|
||||||
os.chdir(os.path.join(tmpdir, 'package'))
|
os.chdir(os.path.join(tmpdir, 'package'))
|
||||||
bundle_dependencies()
|
bundle_dependencies()
|
||||||
subprocess.check_call(['npm', 'install'])
|
subprocess.check_call(['npm', 'install'])
|
||||||
subprocess.check_call(['npm', 'pack'])
|
subprocess.check_call(['npm', 'pack'])
|
||||||
shutil.copy(filename, os.path.join(oldpwd, filename))
|
shutil.copy(filename, os.path.join(oldpwd, filename))
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(main(sys.argv[1:]))
|
sys.exit(main(sys.argv[1:]))
|
||||||
|
@ -27,65 +27,67 @@ import tempfile
|
|||||||
|
|
||||||
|
|
||||||
def extract(path, outdir, bin):
|
def extract(path, outdir, bin):
|
||||||
if os.path.exists(os.path.join(outdir, bin)):
|
if os.path.exists(os.path.join(outdir, bin)):
|
||||||
return # Another process finished extracting, ignore.
|
return # Another process finished extracting, ignore.
|
||||||
|
|
||||||
# Use a temp directory adjacent to outdir so shutil.move can use the same
|
# Use a temp directory adjacent to outdir so shutil.move can use the same
|
||||||
# device atomically.
|
# device atomically.
|
||||||
tmpdir = tempfile.mkdtemp(dir=os.path.dirname(outdir))
|
tmpdir = tempfile.mkdtemp(dir=os.path.dirname(outdir))
|
||||||
def cleanup():
|
|
||||||
try:
|
|
||||||
shutil.rmtree(tmpdir)
|
|
||||||
except OSError:
|
|
||||||
pass # Too late now
|
|
||||||
atexit.register(cleanup)
|
|
||||||
|
|
||||||
def extract_one(mem):
|
def cleanup():
|
||||||
dest = os.path.join(outdir, mem.name)
|
try:
|
||||||
tar.extract(mem, path=tmpdir)
|
shutil.rmtree(tmpdir)
|
||||||
try:
|
except OSError:
|
||||||
os.makedirs(os.path.dirname(dest))
|
pass # Too late now
|
||||||
except OSError:
|
atexit.register(cleanup)
|
||||||
pass # Either exists, or will fail on the next line.
|
|
||||||
shutil.move(os.path.join(tmpdir, mem.name), dest)
|
def extract_one(mem):
|
||||||
|
dest = os.path.join(outdir, mem.name)
|
||||||
|
tar.extract(mem, path=tmpdir)
|
||||||
|
try:
|
||||||
|
os.makedirs(os.path.dirname(dest))
|
||||||
|
except OSError:
|
||||||
|
pass # Either exists, or will fail on the next line.
|
||||||
|
shutil.move(os.path.join(tmpdir, mem.name), dest)
|
||||||
|
|
||||||
|
with tarfile.open(path, 'r:gz') as tar:
|
||||||
|
for mem in tar.getmembers():
|
||||||
|
if mem.name != bin:
|
||||||
|
extract_one(mem)
|
||||||
|
# Extract bin last so other processes only short circuit when extraction is
|
||||||
|
# finished.
|
||||||
|
extract_one(tar.getmember(bin))
|
||||||
|
|
||||||
with tarfile.open(path, 'r:gz') as tar:
|
|
||||||
for mem in tar.getmembers():
|
|
||||||
if mem.name != bin:
|
|
||||||
extract_one(mem)
|
|
||||||
# Extract bin last so other processes only short circuit when extraction is
|
|
||||||
# finished.
|
|
||||||
extract_one(tar.getmember(bin))
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
path = args[0]
|
path = args[0]
|
||||||
suffix = '.npm_binary.tgz'
|
suffix = '.npm_binary.tgz'
|
||||||
tgz = os.path.basename(path)
|
tgz = os.path.basename(path)
|
||||||
|
|
||||||
parts = tgz[:-len(suffix)].split('@')
|
parts = tgz[:-len(suffix)].split('@')
|
||||||
|
|
||||||
if not tgz.endswith(suffix) or len(parts) != 2:
|
if not tgz.endswith(suffix) or len(parts) != 2:
|
||||||
print('usage: %s <path/to/npm_binary>' % sys.argv[0], file=sys.stderr)
|
print('usage: %s <path/to/npm_binary>' % sys.argv[0], file=sys.stderr)
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
name, _ = parts
|
name, _ = parts
|
||||||
|
|
||||||
# Avoid importing from gerrit because we don't want to depend on the right CWD.
|
# Avoid importing from gerrit because we don't want to depend on the right CWD.
|
||||||
sha1 = hashlib.sha1(open(path, 'rb').read()).hexdigest()
|
sha1 = hashlib.sha1(open(path, 'rb').read()).hexdigest()
|
||||||
outdir = '%s-%s' % (path[:-len(suffix)], sha1)
|
outdir = '%s-%s' % (path[:-len(suffix)], sha1)
|
||||||
rel_bin = os.path.join('package', 'bin', name)
|
rel_bin = os.path.join('package', 'bin', name)
|
||||||
bin = os.path.join(outdir, rel_bin)
|
bin = os.path.join(outdir, rel_bin)
|
||||||
if not os.path.isfile(bin):
|
if not os.path.isfile(bin):
|
||||||
extract(path, outdir, rel_bin)
|
extract(path, outdir, rel_bin)
|
||||||
|
|
||||||
nodejs = spawn.find_executable('nodejs')
|
nodejs = spawn.find_executable('nodejs')
|
||||||
if nodejs:
|
if nodejs:
|
||||||
# Debian installs Node.js as 'nodejs', due to a conflict with another
|
# Debian installs Node.js as 'nodejs', due to a conflict with another
|
||||||
# package.
|
# package.
|
||||||
subprocess.check_call([nodejs, bin] + args[1:])
|
subprocess.check_call([nodejs, bin] + args[1:])
|
||||||
else:
|
else:
|
||||||
subprocess.check_call([bin] + args[1:])
|
subprocess.check_call([bin] + args[1:])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
sys.exit(main(sys.argv[1:]))
|
sys.exit(main(sys.argv[1:]))
|
||||||
|
@ -29,56 +29,56 @@ opts.add_option('-s', action='append', help='triplet of artifactId:type:path')
|
|||||||
|
|
||||||
args, ctx = opts.parse_args()
|
args, ctx = opts.parse_args()
|
||||||
if not args.v:
|
if not args.v:
|
||||||
print('version is empty', file=stderr)
|
print('version is empty', file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
root = path.abspath(__file__)
|
root = path.abspath(__file__)
|
||||||
while not path.exists(path.join(root, 'WORKSPACE')):
|
while not path.exists(path.join(root, 'WORKSPACE')):
|
||||||
root = path.dirname(root)
|
root = path.dirname(root)
|
||||||
|
|
||||||
if 'install' == args.a:
|
if 'install' == args.a:
|
||||||
cmd = [
|
cmd = [
|
||||||
'mvn',
|
'mvn',
|
||||||
'install:install-file',
|
'install:install-file',
|
||||||
'-Dversion=%s' % args.v,
|
'-Dversion=%s' % args.v,
|
||||||
]
|
]
|
||||||
elif 'deploy' == args.a:
|
elif 'deploy' == args.a:
|
||||||
cmd = [
|
cmd = [
|
||||||
'mvn',
|
'mvn',
|
||||||
'gpg:sign-and-deploy-file',
|
'gpg:sign-and-deploy-file',
|
||||||
'-DrepositoryId=%s' % args.repository,
|
'-DrepositoryId=%s' % args.repository,
|
||||||
'-Durl=%s' % args.url,
|
'-Durl=%s' % args.url,
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
print("unknown action -a %s" % args.a, file=stderr)
|
print("unknown action -a %s" % args.a, file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
for spec in args.s:
|
for spec in args.s:
|
||||||
artifact, packaging_type, src = spec.split(':')
|
artifact, packaging_type, src = spec.split(':')
|
||||||
exe = cmd + [
|
exe = cmd + [
|
||||||
'-DpomFile=%s' % path.join(root, 'tools', 'maven', '%s_pom.xml' % artifact),
|
'-DpomFile=%s' % path.join(root, 'tools', 'maven', '%s_pom.xml' % artifact),
|
||||||
'-Dpackaging=%s' % packaging_type,
|
'-Dpackaging=%s' % packaging_type,
|
||||||
'-Dfile=%s' % src,
|
'-Dfile=%s' % src,
|
||||||
]
|
]
|
||||||
try:
|
try:
|
||||||
if environ.get('VERBOSE'):
|
if environ.get('VERBOSE'):
|
||||||
print(' '.join(exe), file=stderr)
|
print(' '.join(exe), file=stderr)
|
||||||
check_output(exe)
|
check_output(exe)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
|
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
|
||||||
file=stderr)
|
file=stderr)
|
||||||
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
|
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
|
||||||
print('Command output\n%s' % e.output, file=stderr)
|
print('Command output\n%s' % e.output, file=stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
out = stderr
|
out = stderr
|
||||||
if args.o:
|
if args.o:
|
||||||
out = open(args.o, 'w')
|
out = open(args.o, 'w')
|
||||||
|
|
||||||
with out as fd:
|
with out as fd:
|
||||||
if args.repository:
|
if args.repository:
|
||||||
print('Repository: %s' % args.repository, file=fd)
|
print('Repository: %s' % args.repository, file=fd)
|
||||||
if args.url:
|
if args.url:
|
||||||
print('URL: %s' % args.url, file=fd)
|
print('URL: %s' % args.url, file=fd)
|
||||||
print('Version: %s' % args.v, file=fd)
|
print('Version: %s' % args.v, file=fd)
|
||||||
|
@ -20,8 +20,8 @@ import zipfile
|
|||||||
import io
|
import io
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print('usage: %s <out.zip> <in.zip>...' % sys.argv[0], file=sys.stderr)
|
print('usage: %s <out.zip> <in.zip>...' % sys.argv[0], file=sys.stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
outfile = sys.argv[1]
|
outfile = sys.argv[1]
|
||||||
infiles = sys.argv[2:]
|
infiles = sys.argv[2:]
|
||||||
@ -29,22 +29,22 @@ seen = set()
|
|||||||
SERVICES = 'META-INF/services/'
|
SERVICES = 'META-INF/services/'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with zipfile.ZipFile(outfile, 'w') as outzip:
|
with zipfile.ZipFile(outfile, 'w') as outzip:
|
||||||
services = collections.defaultdict(lambda: '')
|
services = collections.defaultdict(lambda: '')
|
||||||
for infile in infiles:
|
for infile in infiles:
|
||||||
with zipfile.ZipFile(infile) as inzip:
|
with zipfile.ZipFile(infile) as inzip:
|
||||||
for info in inzip.infolist():
|
for info in inzip.infolist():
|
||||||
n = info.filename
|
n = info.filename
|
||||||
if n in seen:
|
if n in seen:
|
||||||
continue
|
continue
|
||||||
elif n.startswith(SERVICES):
|
elif n.startswith(SERVICES):
|
||||||
# Concatenate all provider configuration files.
|
# Concatenate all provider configuration files.
|
||||||
services[n] += inzip.read(n).decode("UTF-8")
|
services[n] += inzip.read(n).decode("UTF-8")
|
||||||
continue
|
continue
|
||||||
outzip.writestr(info, inzip.read(n))
|
outzip.writestr(info, inzip.read(n))
|
||||||
seen.add(n)
|
seen.add(n)
|
||||||
|
|
||||||
for n, v in list(services.items()):
|
for n, v in list(services.items()):
|
||||||
outzip.writestr(n, v)
|
outzip.writestr(n, v)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
exit('Failed to merge jars: %s' % err)
|
exit('Failed to merge jars: %s' % err)
|
||||||
|
@ -101,9 +101,9 @@ def _main():
|
|||||||
summary = summary + "."
|
summary = summary + "."
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"version": Version(options.version),
|
"version": Version(options.version),
|
||||||
"previous": options.previous,
|
"previous": options.previous,
|
||||||
"summary": summary
|
"summary": summary
|
||||||
}
|
}
|
||||||
|
|
||||||
war = os.path.join(
|
war = os.path.join(
|
||||||
|
@ -15,57 +15,57 @@
|
|||||||
from os import path
|
from os import path
|
||||||
|
|
||||||
REPO_ROOTS = {
|
REPO_ROOTS = {
|
||||||
'GERRIT': 'http://gerrit-maven.storage.googleapis.com',
|
'GERRIT': 'http://gerrit-maven.storage.googleapis.com',
|
||||||
'GERRIT_API': 'https://gerrit-api.commondatastorage.googleapis.com/release',
|
'GERRIT_API': 'https://gerrit-api.commondatastorage.googleapis.com/release',
|
||||||
'MAVEN_CENTRAL': 'http://repo1.maven.org/maven2',
|
'MAVEN_CENTRAL': 'http://repo1.maven.org/maven2',
|
||||||
'MAVEN_LOCAL': 'file://' + path.expanduser('~/.m2/repository'),
|
'MAVEN_LOCAL': 'file://' + path.expanduser('~/.m2/repository'),
|
||||||
'MAVEN_SNAPSHOT': 'https://oss.sonatype.org/content/repositories/snapshots',
|
'MAVEN_SNAPSHOT': 'https://oss.sonatype.org/content/repositories/snapshots',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def resolve_url(url, redirects):
|
def resolve_url(url, redirects):
|
||||||
""" Resolve URL of a Maven artifact.
|
""" Resolve URL of a Maven artifact.
|
||||||
|
|
||||||
prefix:path is passed as URL. prefix identifies known or custom
|
prefix:path is passed as URL. prefix identifies known or custom
|
||||||
repositories that can be rewritten in redirects set, passed as
|
repositories that can be rewritten in redirects set, passed as
|
||||||
second arguments.
|
second arguments.
|
||||||
|
|
||||||
A special case is supported, when prefix neither exists in
|
A special case is supported, when prefix neither exists in
|
||||||
REPO_ROOTS, no in redirects set: the url is returned as is.
|
REPO_ROOTS, no in redirects set: the url is returned as is.
|
||||||
This enables plugins to pass custom maven_repository URL as is
|
This enables plugins to pass custom maven_repository URL as is
|
||||||
directly to maven_jar().
|
directly to maven_jar().
|
||||||
|
|
||||||
Returns a resolved path for Maven artifact.
|
Returns a resolved path for Maven artifact.
|
||||||
"""
|
"""
|
||||||
s = url.find(':')
|
s = url.find(':')
|
||||||
if s < 0:
|
if s < 0:
|
||||||
return url
|
return url
|
||||||
scheme, rest = url[:s], url[s+1:]
|
scheme, rest = url[:s], url[s+1:]
|
||||||
if scheme in redirects:
|
if scheme in redirects:
|
||||||
root = redirects[scheme]
|
root = redirects[scheme]
|
||||||
elif scheme in REPO_ROOTS:
|
elif scheme in REPO_ROOTS:
|
||||||
root = REPO_ROOTS[scheme]
|
root = REPO_ROOTS[scheme]
|
||||||
else:
|
else:
|
||||||
return url
|
return url
|
||||||
root = root.rstrip('/')
|
root = root.rstrip('/')
|
||||||
rest = rest.lstrip('/')
|
rest = rest.lstrip('/')
|
||||||
return '/'.join([root, rest])
|
return '/'.join([root, rest])
|
||||||
|
|
||||||
|
|
||||||
def hash_file(hash_obj, path):
|
def hash_file(hash_obj, path):
|
||||||
"""Hash the contents of a file.
|
"""Hash the contents of a file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
hash_obj: an open hash object, e.g. hashlib.sha1().
|
hash_obj: an open hash object, e.g. hashlib.sha1().
|
||||||
path: path to the file to hash.
|
path: path to the file to hash.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The passed-in hash_obj.
|
The passed-in hash_obj.
|
||||||
"""
|
"""
|
||||||
with open(path, 'rb') as f:
|
with open(path, 'rb') as f:
|
||||||
while True:
|
while True:
|
||||||
b = f.read(8192)
|
b = f.read(8192)
|
||||||
if not b:
|
if not b:
|
||||||
break
|
break
|
||||||
hash_obj.update(b)
|
hash_obj.update(b)
|
||||||
return hash_obj
|
return hash_obj
|
||||||
|
@ -16,28 +16,30 @@
|
|||||||
import unittest
|
import unittest
|
||||||
from util import resolve_url
|
from util import resolve_url
|
||||||
|
|
||||||
|
|
||||||
class TestResolveUrl(unittest.TestCase):
|
class TestResolveUrl(unittest.TestCase):
|
||||||
""" run to test:
|
""" run to test:
|
||||||
python -m unittest -v util_test
|
python -m unittest -v util_test
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def testKnown(self):
|
def testKnown(self):
|
||||||
url = resolve_url('GERRIT:foo.jar', {})
|
url = resolve_url('GERRIT:foo.jar', {})
|
||||||
self.assertEqual(url, 'http://gerrit-maven.storage.googleapis.com/foo.jar')
|
self.assertEqual(url, 'http://gerrit-maven.storage.googleapis.com/foo.jar')
|
||||||
|
|
||||||
def testKnownRedirect(self):
|
def testKnownRedirect(self):
|
||||||
url = resolve_url('MAVEN_CENTRAL:foo.jar',
|
url = resolve_url('MAVEN_CENTRAL:foo.jar',
|
||||||
{'MAVEN_CENTRAL': 'http://my.company.mirror/maven2'})
|
{'MAVEN_CENTRAL': 'http://my.company.mirror/maven2'})
|
||||||
self.assertEqual(url, 'http://my.company.mirror/maven2/foo.jar')
|
self.assertEqual(url, 'http://my.company.mirror/maven2/foo.jar')
|
||||||
|
|
||||||
def testCustom(self):
|
def testCustom(self):
|
||||||
url = resolve_url('http://maven.example.com/release/foo.jar', {})
|
url = resolve_url('http://maven.example.com/release/foo.jar', {})
|
||||||
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
|
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
|
||||||
|
|
||||||
|
def testCustomRedirect(self):
|
||||||
|
url = resolve_url('MAVEN_EXAMPLE:foo.jar',
|
||||||
|
{'MAVEN_EXAMPLE': 'http://maven.example.com/release'})
|
||||||
|
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
|
||||||
|
|
||||||
def testCustomRedirect(self):
|
|
||||||
url = resolve_url('MAVEN_EXAMPLE:foo.jar',
|
|
||||||
{'MAVEN_EXAMPLE': 'http://maven.example.com/release'})
|
|
||||||
self.assertEqual(url, 'http://maven.example.com/release/foo.jar')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -23,24 +23,24 @@ parser = OptionParser()
|
|||||||
opts, args = parser.parse_args()
|
opts, args = parser.parse_args()
|
||||||
|
|
||||||
if not len(args):
|
if not len(args):
|
||||||
parser.error('not enough arguments')
|
parser.error('not enough arguments')
|
||||||
elif len(args) > 1:
|
elif len(args) > 1:
|
||||||
parser.error('too many arguments')
|
parser.error('too many arguments')
|
||||||
|
|
||||||
DEST_PATTERN = r'\g<1>%s\g<3>' % args[0]
|
DEST_PATTERN = r'\g<1>%s\g<3>' % args[0]
|
||||||
|
|
||||||
|
|
||||||
def replace_in_file(filename, src_pattern):
|
def replace_in_file(filename, src_pattern):
|
||||||
try:
|
try:
|
||||||
f = open(filename, "r")
|
f = open(filename, "r")
|
||||||
s = f.read()
|
s = f.read()
|
||||||
f.close()
|
f.close()
|
||||||
s = re.sub(src_pattern, DEST_PATTERN, s)
|
s = re.sub(src_pattern, DEST_PATTERN, s)
|
||||||
f = open(filename, "w")
|
f = open(filename, "w")
|
||||||
f.write(s)
|
f.write(s)
|
||||||
f.close()
|
f.close()
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
print('error updating %s: %s' % (filename, err), file=sys.stderr)
|
print('error updating %s: %s' % (filename, err), file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
|
src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
|
||||||
@ -48,8 +48,8 @@ src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
|
|||||||
for project in ['gerrit-acceptance-framework', 'gerrit-extension-api',
|
for project in ['gerrit-acceptance-framework', 'gerrit-extension-api',
|
||||||
'gerrit-plugin-api', 'gerrit-plugin-gwtui',
|
'gerrit-plugin-api', 'gerrit-plugin-gwtui',
|
||||||
'gerrit-war']:
|
'gerrit-war']:
|
||||||
pom = os.path.join('tools', 'maven', '%s_pom.xml' % project)
|
pom = os.path.join('tools', 'maven', '%s_pom.xml' % project)
|
||||||
replace_in_file(pom, src_pattern)
|
replace_in_file(pom, src_pattern)
|
||||||
|
|
||||||
src_pattern = re.compile(r'^(GERRIT_VERSION = ")([-.\w]+)(")$', re.MULTILINE)
|
src_pattern = re.compile(r'^(GERRIT_VERSION = ")([-.\w]+)(")$', re.MULTILINE)
|
||||||
replace_in_file('version.bzl', src_pattern)
|
replace_in_file('version.bzl', src_pattern)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user