Update Czech Sahana translation + helper scripts, closes #411
This commit is contained in:
parent
5107631271
commit
21f695d7ba
0
extra/helpers/frontlinesms-lang-js.py
Normal file → Executable file
0
extra/helpers/frontlinesms-lang-js.py
Normal file → Executable file
95
extra/helpers/sahana-lang-compare.py
Executable file
95
extra/helpers/sahana-lang-compare.py
Executable file
@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import io
|
||||
import os
|
||||
from ast import literal_eval
|
||||
from pprint import pprint
|
||||
|
||||
# GIS CSV can have extra large fields
|
||||
csv.field_size_limit(2147483647)
|
||||
|
||||
# String which don't exist verbatim in the code
|
||||
extras = (
|
||||
'Enter a number between %(min)g and %(max)g',
|
||||
'Enter a number greater than or equal to %(min)g',
|
||||
'Enter a number less than or equal to %(max)g',
|
||||
'Enter an integer between %(min)g and %(max)g',
|
||||
'Enter an integer greater than or equal to %(min)g',
|
||||
'Enter an integer less than or equal to %(max)g',
|
||||
)
|
||||
|
||||
def get_file_contents(filename):
|
||||
# Return decoded file contents
|
||||
with open(filename, 'rb') as f:
|
||||
file_contents = f.read()
|
||||
try:
|
||||
return file_contents.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
return file_contents.decode('latin-1')
|
||||
|
||||
def get_csv_contents(filename):
|
||||
# Return list of all fields from a CSV file
|
||||
f = io.StringIO(get_file_contents(filename))
|
||||
csv_contents = []
|
||||
for row in csv.reader(f):
|
||||
csv_contents.extend(row)
|
||||
return csv_contents
|
||||
|
||||
def main(args):
|
||||
basename = os.path.basename(args.langfile)
|
||||
# Load existing translations from the current (old) Sahana Eden instance
|
||||
with open(os.path.join(args.web2pydir, 'applications/eden/languages', basename)) as f:
|
||||
old_translations = literal_eval(f.read())
|
||||
# Load translations produced by sahana-lang-convert.py
|
||||
with open(args.langfile) as f:
|
||||
translations = literal_eval(f.read())
|
||||
|
||||
missing_translations = {key:value for key,value in old_translations.items() if key not in translations}
|
||||
|
||||
for root, dirs, files in os.walk(args.web2pydir):
|
||||
# Iterate over all web2py subdirectories except "languages" which already contain translations
|
||||
if 'languages' in dirs:
|
||||
dirs.remove('languages')
|
||||
for file in files:
|
||||
extension = os.path.splitext(file)[1].lower()
|
||||
filename = os.path.join(root, file)
|
||||
|
||||
if extension in ('.py', '.html', '.js'):
|
||||
try:
|
||||
file_contents = get_file_contents(filename)
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
for key,value in missing_translations.copy().items():
|
||||
# Naively search for quoted strings in .py .html and .js files
|
||||
if f"'{key}'" in file_contents or f'"{key}"' in file_contents:
|
||||
translations[key] = value
|
||||
del missing_translations[key]
|
||||
|
||||
elif extension == '.csv':
|
||||
try:
|
||||
csv_contents = get_csv_contents(filename)
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
for key,value in missing_translations.copy().items():
|
||||
# Naively search for full strings in csv fields
|
||||
if key in csv_contents:
|
||||
translations[key] = value
|
||||
del missing_translations[key]
|
||||
|
||||
for key in extras:
|
||||
# Add the extra translations which are never matched verbatim
|
||||
if key not in translations:
|
||||
translations[key] = old_translations[key]
|
||||
|
||||
with open(basename, 'w') as langfile:
|
||||
# Write the updated translation file
|
||||
print('# -*- coding: utf-8 -*-', file=langfile)
|
||||
pprint(translations, langfile, 0, 8192)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Spotter Cluster Sahana Eden translation comparator')
|
||||
parser.add_argument('langfile', help='New translation file (with possibly missing strings).')
|
||||
parser.add_argument('web2pydir', help='Path to Web2py root directory.')
|
||||
main(parser.parse_args())
|
@ -1,30 +1,35 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from pprint import pprint
|
||||
import argparse
|
||||
import csv
|
||||
import os
|
||||
import xlrd
|
||||
from pprint import pprint
|
||||
|
||||
def main(args):
|
||||
translations = {}
|
||||
basename, extension = os.path.splitext(args.inputfile.lower())
|
||||
basename, extension = os.path.splitext(args.inputfile)
|
||||
extension = extension.lower()
|
||||
|
||||
if extension == '.csv':
|
||||
# Read CSV file, assume the same structure as given by the export
|
||||
with open(args.inputfile, 'r') as csvfile:
|
||||
reader = csv.reader(csvfile, delimiter=',', quotechar='\'')
|
||||
reader = csv.reader(csvfile)
|
||||
for line in reader:
|
||||
translations[line[1]] = line[2]
|
||||
elif extension in ('.xls', '.xlsx'):
|
||||
# Read the XLS(X) file, assume type of columns from their count
|
||||
sheet = xlrd.open_workbook(args.inputfile).sheet_by_index(0)
|
||||
source_col,target_col = (1,2) if sheet.row(0)[2] else (0,1)
|
||||
for i in range(1, sheet.nrows):
|
||||
row = sheet.row(i)
|
||||
translations[row[1].value] = row[2].value
|
||||
translations[row[source_col].value] = row[target_col].value
|
||||
else:
|
||||
print('Unknown input file extension')
|
||||
return
|
||||
|
||||
with open('{}.py'.format(basename), 'w') as langfile:
|
||||
# Write the translation file
|
||||
print('# -*- coding: utf-8 -*-', file=langfile)
|
||||
pprint(translations, langfile, 0, 8192)
|
||||
|
@ -14,7 +14,7 @@ RUN EOF
|
||||
git -C /srv/web2py submodule update
|
||||
|
||||
# Symlink WSGI handler
|
||||
ln -s /srv/web2py/handlers/wsgihandler.py /srv/web2py/wsgihandler.py
|
||||
ln -s handlers/wsgihandler.py /srv/web2py/wsgihandler.py
|
||||
|
||||
# Install Sahana
|
||||
git clone --depth 1 https://github.com/sahana/eden.git /srv/web2py/applications/eden
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user