Mercurial > pyarq-presupuestos
comparison Generic/fiebdc.py @ 26:16f91684686b default tip
Upgrade to python 3. Keep python 2/3 compatibility
author | Miguel Ángel Bárcena Rodríguez <miguelangel@obraencurso.es> |
---|---|
date | Tue, 18 Jun 2019 17:50:23 +0200 |
parents | 189f8274aecd |
children |
comparison
equal
deleted
inserted
replaced
25:189f8274aecd | 26:16f91684686b |
---|---|
20 ## along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 ## along with this program. If not, see <http://www.gnu.org/licenses/>. |
21 | 21 |
22 # specifications in http://www.fiebdc.org | 22 # specifications in http://www.fiebdc.org |
23 | 23 |
24 # Modules | 24 # Modules |
25 | |
26 # python 2/3 compatibility | |
27 from __future__ import absolute_import, division, print_function, unicode_literals | |
28 from builtins import str as text | |
29 from six import text_type | |
30 from io import open | |
31 | |
25 import time | 32 import time |
26 import re | 33 import re |
27 import calendar | 34 import calendar |
28 import os.path | 35 import os.path |
29 import unicodedata | 36 import unicodedata |
30 import hashlib | 37 import hashlib |
31 # pyArq-Presupuestos modules | 38 # pyArq-Presupuestos modules |
32 import base | |
33 from Generic import utils | 39 from Generic import utils |
34 from Generic import globalVars | 40 from Generic import globalVars |
35 | 41 |
36 class Read(object): | 42 class Read(object): |
37 """fiebdc.Read: | 43 """fiebdc.Read: |
91 self.__character_sets_dict = {"ANSI" : "cp1252", | 97 self.__character_sets_dict = {"ANSI" : "cp1252", |
92 "850" : "850", | 98 "850" : "850", |
93 "437" : "cp437"} | 99 "437" : "cp437"} |
94 self.__file_format = "FIEBDC-3/2007" | 100 self.__file_format = "FIEBDC-3/2007" |
95 self.__generator = globalVars.name + " " + globalVars.version | 101 self.__generator = globalVars.name + " " + globalVars.version |
96 self.__character_set = "850" | 102 self.__character_set = "850" # ¿set default character set to cp1252? |
97 self.__pattern = { | 103 self.__pattern = { |
98 "control_tilde" : re.compile(u"((\r\n)| |\t)+~"), | 104 "control_tilde" : re.compile("((\r\n)| |\t)+~"), |
99 "control_vbar" : re.compile(u"((\r\n)| |\t)+\|"), | 105 "control_vbar" : re.compile("((\r\n)| |\t)+\|"), |
100 "control_backslash" : re.compile(ur"((\r\n)| |\t)+\\"), | 106 "control_backslash" : re.compile(r"((\r\n)| |\t)+\\"), |
101 "valid_code" : re.compile(u"[^A-Za-z0-9ñÑ.$#%&_]"), | 107 "valid_code" : re.compile("[^A-Za-z0-9ñÑ.$#%&_]"), |
102 "special_char": re.compile(u"[#%&]"), | 108 "special_char": re.compile("[#%&]"), |
103 "no_float": re.compile(u"[^\-0-9.]"), | 109 "no_float": re.compile("[^\-0-9.]"), |
104 "formula" : re.compile(u".*[^0123456789\.()\+\-\*/\^abcdp ].*"), | 110 "formula" : re.compile(".*[^0123456789\.()\+\-\*/\^abcdp ].*"), |
105 "comment": re.compile(u"#.*\r\n"), | 111 "comment": re.compile("#.*\r\n"), |
106 "empty_line": re.compile(ur"(\r\n) *\r\n"), | 112 "empty_line": re.compile(r"(\r\n) *\r\n"), |
107 "space_before_backslash" : re.compile(ur"( )+\\"), | 113 "space_before_backslash" : re.compile(r"( )+\\"), |
108 "space_after_backslash" : re.compile(ur"\\( )+"), | 114 "space_after_backslash" : re.compile(r"\\( )+"), |
109 "start_noend_backslash" : re.compile(u"(\r\n\\\.*[^\\\])\r\n"), | 115 "start_noend_backslash" : re.compile("(\r\n\\\.*[^\\\])\r\n"), |
110 "end_oper": re.compile(u"(\+|-|\*|/|/^|@|&|<|>|<=|>=|=|!) *\r\n"), | 116 "end_oper": re.compile("(\+|-|\*|/|/^|@|&|<|>|<=|>=|=|!) *\r\n"), |
111 "matricial_var" : re.compile(u"(\r\n *[%|\$][A-ZÑ].*=.*,) *\r\n"), | 117 "matricial_var" : re.compile("(\r\n *[%|\$][A-ZÑ].*=.*,) *\r\n"), |
112 "descomposition" : re.compile(u"^([^:]+):(.*)$"), | 118 "descomposition" : re.compile("^([^:]+):(.*)$"), |
113 "var" : re.compile(u"^([$%][A-ZÑ][()0-9, ]*)=(.*)$"), | 119 "var" : re.compile("^([$%][A-ZÑ][()0-9, ]*)=(.*)$"), |
114 "after_first_tilde" : re.compile(u"^[^~]*~"), | 120 "after_first_tilde" : re.compile("^[^~]*~"), |
115 "end_control" : re.compile(u"((\r\n)| |\t)+$"), | 121 "end_control" : re.compile("((\r\n)| |\t)+$"), |
116 } | 122 } |
117 self.__statistics = Statistics() | 123 self.__statistics = Statistics() |
118 | 124 |
119 def cancel(self): | 125 def cancel(self): |
120 """def cancel() | 126 """def cancel() |
131 '~', '|' erased. | 137 '~', '|' erased. |
132 Before separator \ not deleted because it affects the reading of the | 138 Before separator \ not deleted because it affects the reading of the |
133 record ~P | 139 record ~P |
134 """ | 140 """ |
135 # "control_tilde" : "((\r\n)| |\t)+~" | 141 # "control_tilde" : "((\r\n)| |\t)+~" |
136 string = self.__pattern["control_tilde"].sub(u"~",string) | 142 string = self.__pattern["control_tilde"].sub("~",string) |
137 # "control_vbar" : "((\r\n)| |\t)+\|" | 143 # "control_vbar" : "((\r\n)| |\t)+\|" |
138 string = self.__pattern["control_vbar"].sub(u"|",string) | 144 string = self.__pattern["control_vbar"].sub("|",string) |
139 # "control_backslash" : r"((\r\n)| |\t)+\\" | 145 # "control_backslash" : r"((\r\n)| |\t)+\\" |
140 #string = self.__pattern["control_backslash"].sub(r"\\",string) | 146 #string = self.__pattern["control_backslash"].sub(r"\\",string) |
141 return string | 147 return string |
142 | 148 |
143 def validateCode(self, code): | 149 def validateCode(self, code): |
144 """validateCode(code) | 150 """validateCode(code) |
145 | 151 |
146 Test if the code have invalid characters and try to erase it, | 152 Test if the code have invalid characters and try to erase it, |
147 if it is posible return a valid code else return a empty string. | 153 if it is posible return a valid code else return a empty string. |
148 """ | 154 """ |
149 if not isinstance(code, unicode): | 155 if not isinstance(code, text_type): |
150 _str = _("Invalid code, it must be a unicode string") | 156 _tuni = _("Invalid code, it must be a text string") |
151 print(_str.encode("utf-8") ) | 157 print(_tuni) |
152 return u"" | 158 return "" |
153 # Valid chararcter: A-Z a-z 0-9 ñ Ñ . $ # % & _ | 159 # Valid chararcter: A-Z a-z 0-9 ñ Ñ . $ # % & _ |
154 # "valid_code" : "[^A-Za-z0-9ñÑ.$#%&_]" | 160 # "valid_code" : "[^A-Za-z0-9ñÑ.$#%&_]" |
155 _ucode = self.__pattern["valid_code"].sub(u"_", code) | 161 _ucode = self.__pattern["valid_code"].sub("_", code) |
156 if _ucode != code: | 162 if _ucode != code: |
157 try: | 163 try: |
158 _tstr = _("The code '$1' have invalid characters," \ | 164 _tuni = _("The code '$1' have invalid characters," \ |
159 " replaced by '$2'.") | 165 " replaced by '$2'.") |
160 print(utils.mapping(_tstr, (code.encode("utf8"), | 166 _uni = utils.mapping(_tuni, (code, _ucode)) |
161 _ucode.encode("utf8"))) ) | 167 print(_uni) |
162 except: | 168 except: |
163 _tstr = _("The code '$1' have invalid characters and can not" \ | 169 _tuni = _("The code '$1' have invalid characters and can not" \ |
164 " be encoded in utf8.") | 170 " be encoded in utf-8.") |
165 print(utils.mapping(_tstr, (code,)).encode("utf-8") ) | 171 _uni = utils.mapping(_tuni, (code,)) |
172 print(_uni) | |
166 | 173 |
167 if len(_ucode) == 0: | 174 if len(_ucode) == 0: |
168 _normalize_code = "" | 175 _normalize_code = "" |
169 for c in unicodedata.normalize('NFD', _ucode): | 176 for c in unicodedata.normalize('NFD', _ucode): |
170 if unicodedata.category(c) != 'Mn': | 177 if unicodedata.category(c) != 'Mn': |
171 _normalize_code.join(c) | 178 _normalize_code.join(c) |
172 #_normalize_code = ''.join((c for c in unicodedata.normalize( | 179 #_normalize_code = ''.join((c for c in unicodedata.normalize( |
173 # 'NFD', _ucode) if unicodedata.category(c) != 'Mn')) | 180 # 'NFD', _ucode) if unicodedata.category(c) != 'Mn')) |
174 # from http://www.leccionespracticas.com/uncategorized/ | 181 # from http://www.leccionespracticas.com/uncategorized/ |
175 # eliminar-tildes-con-python-solucionado/ | 182 # eliminar-tildes-con-python-solucionado/ |
176 _ucode = self.__pattern["valid_code"].sub(u"", _normalize_code) | 183 _ucode = self.__pattern["valid_code"].sub("", _normalize_code) |
177 if len(_ucode) == 0: | 184 if len(_ucode) == 0: |
178 _hash_code = hashlib.sha256() | 185 _hash_code = hashlib.sha256() |
179 _hash_code.update(code.encode('utf-8')) | 186 _hash_code.update(code.encode('utf-8')) |
180 _hexdigest_code = _hash_code.hexdigest() | 187 _hexdigest_code = _hash_code.hexdigest() |
181 _p_valid_code = self.__pattern["valid_code"] | 188 _p_valid_code = self.__pattern["valid_code"] |
182 _ucode = _p_valid_code.sub(u"", _hexdigest_code) | 189 _ucode = _p_valid_code.sub("", _hexdigest_code) |
183 code = _ucode | 190 code = _ucode |
184 if code == u"##": | 191 if code == "##": |
185 # root code is an empty code : set to ROOT | 192 # root code is an empty code : set to ROOT |
186 return u"ROOT" | 193 return "ROOT" |
187 # the lasts characters can not be <#> or <##> | 194 # the lasts characters can not be <#> or <##> |
188 # <##> -> root record in FIEFDC-3 | 195 # <##> -> root record in FIEFDC-3 |
189 # <#> -> chapter record in FIEFDC-3 | 196 # <#> -> chapter record in FIEFDC-3 |
190 if len(code) > 0: | 197 if len(code) > 0: |
191 while code[-1] == u"#": | 198 while code[-1] == "#": |
192 code = code[:-1] | 199 code = code[:-1] |
193 if len(code) == 0: | 200 if len(code) == 0: |
194 return code | 201 return code |
195 if len(code) > 20: | 202 if len(code) > 20: |
196 code = code[:20] | 203 code = code[:20] |
197 # only one charecter # % or & | 204 # only one charecter # % or & |
198 if sum([code.count(c) for c in u'#%&']) > 1: | 205 if sum([code.count(c) for c in u'#%&']) > 1: |
199 print(utils.mapping(_("The code '$1' contains special "\ | 206 _tuni = _("The code '$1' contains special characters repeated.") |
200 "characters repeated."), | 207 _uni = utils.mapping(_tuni, (code,)) |
201 (code.encode("utf8"),)).encode("utf-8") ) | 208 print(_uni) |
202 _i = min([code.find(c) for c in u'#%&']) | 209 _i = min([code.find(c) for c in u'#%&']) |
203 code = code[:_i+1] + \ | 210 code = code[:_i+1] + \ |
204 self.__pattern["special_char"].sub(u"", code[_i+1:]) | 211 self.__pattern["special_char"].sub("", code[_i+1:]) |
205 return code | 212 return code |
206 | 213 |
207 def parseDate(self, date): | 214 def parseDate(self, date): |
208 """parseDate(date) | 215 """parseDate(date) |
209 | 216 |
215 len < 3 YY | 222 len < 3 YY |
216 Test date string and return a tuple (YYYY, MM, DD) | 223 Test date string and return a tuple (YYYY, MM, DD) |
217 or None if the date format is invalid | 224 or None if the date format is invalid |
218 """ | 225 """ |
219 # All characters must be numbers, len <= 8 and not empty string | 226 # All characters must be numbers, len <= 8 and not empty string |
220 if not date.isdigit() or len(date) > 8 or date == u"": | 227 if not date.isdigit() or len(date) > 8 or date == "": |
221 return None | 228 return None |
222 else: | 229 else: |
223 if len(date)%2 == 1: # uneven len: add a leading 0 | 230 if len(date)%2 == 1: # uneven len: add a leading 0 |
224 date = u"0" + date | 231 date = "0" + date |
225 if len(date) == 8: | 232 if len(date) == 8: |
226 _d = int(date[:2]) | 233 _d = int(date[:2]) |
227 _m = int(date[2:4]) | 234 _m = int(date[2:4]) |
228 _y = int(date[4:8]) | 235 _y = int(date[4:8]) |
229 elif len(date) <= 6: | 236 elif len(date) <= 6: |
249 | 256 |
250 def parseRecord(self, record, interface): | 257 def parseRecord(self, record, interface): |
251 """parseRecord(record, interface) | 258 """parseRecord(record, interface) |
252 | 259 |
253 record: the record line readed from the file whith the format: | 260 record: the record line readed from the file whith the format: |
254 type|field|field|subfield\subfield|... | 261 type|field|field|subfield\\subfield|... |
255 [a] nothing or "a" | 262 [a] nothing or "a" |
256 {a} zero or more #-#twice#-# "a" | 263 {a} zero or more #-#twice#-# "a" |
257 <a> one or more #-#twice#-# "a" | 264 <a> one or more #-#twice#-# "a" |
258 Types: V C D Y M N T K L Q J G E X B F A | 265 Types: V C D Y M N T K L Q J G E X B F A |
259 V: Property and Version | 266 V: Property and Version |
260 1- [File_Owner] | 267 1- [File_Owner] |
261 2- Format_Version[\DDMMYYYY] | 268 2- Format_Version[\\DDMMYYYY] |
262 3- [Program_Generator] | 269 3- [Program_Generator] |
263 4- [Header]\{Title\} | 270 4- [Header]\\{Title\\} |
264 5- [Chaters_set] | 271 5- [Chaters_set] |
265 6- [Comment] | 272 6- [Comment] |
266 C: Record: | 273 C: Record: |
267 1- Code{\Code} | 274 1- Code{\\Code} |
268 2- [Unit] | 275 2- [Unit] |
269 3- [Summary] | 276 3- [Summary] |
270 4- {Price\} | 277 4- {Price\\} |
271 5- {Date\} | 278 5- {Date\\} |
272 6- [Type] | 279 6- [Type] |
273 D or Y: DECOMPOSITION or ADD DECOMPOSITION | 280 D or Y: DECOMPOSITION or ADD DECOMPOSITION |
274 1- Parent Code | 281 1- Parent Code |
275 2- <Child Code\ [Factor]\ [Yield]> | 282 2- <Child Code\\ [Factor]\\ [Yield]> |
276 M or N: MEASURE or ADD MEASURE | 283 M or N: MEASURE or ADD MEASURE |
277 1- [Parent Code\]Child Code | 284 1- [Parent Code\\]Child Code |
278 2- {Path\} | 285 2- {Path\} |
279 3- TOTAL MEASURE | 286 3- TOTAL MEASURE |
280 4- {Type\Comment\Unit\Length\Width\Height\} | 287 4- {Type\\Comment\\Unit\\Length\\Width\\Height\\} |
281 5- [Label] | 288 5- [Label] |
282 T: Text | 289 T: Text |
283 1- Code | 290 1- Code |
284 2- Description text | 291 2- Description text |
285 K: Coefficients | 292 K: Coefficients |
286 1- { DN \ DD \ DS \ DR \ DI \ DP \ DC \ DM \ DIVISA \ } | 293 1- { DN \\ DD \\ DS \\ DR \\ DI \\ DP \\ DC \\ DM \\ DIVISA \\ } |
287 2- CI \ GG \ BI \ BAJA \ IVA | 294 2- CI \\ GG \\ BI \\ BAJA \\ IVA |
288 3- { DRC \ DC \ DRO \ DFS \ DRS \ DFO \ DUO \ DI \ DES \ DN \ | 295 3- { DRC \\ DC \\ DRO \\ DFS \\ DRS \\ DFO \\ DUO \\ DI \\ DES \\ DN \\ |
289 DD \ DS \ DIVISA \ } | 296 DD \\ DS \\ DIVISA \\ } |
290 4- [ n ] | 297 4- [ n ] |
291 L: Sheet of Conditions 1 | 298 L: Sheet of Conditions 1 |
292 A) | 299 A) |
293 1- Empty | 300 1- Empty |
294 2- {Section Code\Section Title} | 301 2- {Section Code\\Section Title} |
295 B) | 302 B) |
296 1- Record Code | 303 1- Record Code |
297 2- {Section Code\Section Text} | 304 2- {Section Code\\Section Text} |
298 3- {Section Code\RTF file} | 305 3- {Section Code\\RTF file} |
299 4- {Section Code\HTM file} | 306 4- {Section Code\\HTM file} |
300 Q: Sheet of Conditions 2 | 307 Q: Sheet of Conditions 2 |
301 1- Record Code | 308 1- Record Code |
302 2- {Section Code\Paragraph key\{Field key;}\}| | 309 2- {Section Code\\Paragraph key\\{Field key;}\\}| |
303 J: Sheet of Conditions 3 | 310 J: Sheet of Conditions 3 |
304 1- Paragraph code | 311 1- Paragraph code |
305 2- [Paragraph text] | 312 2- [Paragraph text] |
306 3- [RTF file] | 313 3- [RTF file] |
307 4- [HTML file] | 314 4- [HTML file] |
308 G: Grafic info | 315 G: Grafic info |
309 1- <grafic_file.ext\> | 316 1- <grafic_file.ext\\> |
310 E: Company | 317 E: Company |
311 1- company Code | 318 1- company Code |
312 2 [ summary ] | 319 2 [ summary ] |
313 3- [ name ] | 320 3- [ name ] |
314 4- { [ type ] \ [ subname ] \ [ address ] \ [ postal_code ] | 321 4- { [ type ] \\ [ subname ] \\ [ address ] \\ [ postal_code ] |
315 \ [ town ] \ [ province ] \ [ country ] \ { phone; } | 322 \\ [ town ] \\ [ province ] \\ [ country ] \\ { phone; } |
316 \ { fax; } \ {contact_person; } \ } | 323 \\ { fax; } \\ {contact_person; } \\ } |
317 5- [ cif ] \ [ web ] \ [ email ] \ | 324 5- [ cif ] \\ [ web ] \\ [ email ] \\ |
318 X: Tecnical information | 325 X: Tecnical information |
319 A) | 326 A) |
320 1- Empty | 327 1- Empty |
321 2- < TI_Code \ TI_Descitption \ TI_Unit > | 328 2- < TI_Code \\ TI_Descitption \\ TI_Unit > |
322 B) | 329 B) |
323 1- Record_code | 330 1- Record_code |
324 2- < TI_Code \ TI_value > | 331 2- < TI_Code \\ TI_value > |
325 F: #-#Adjunto#-# File | 332 F: #-#Adjunto#-# File |
326 1- Record code | 333 1- Record code |
327 2- { Type \ { Filenames; } \ [Description] } | 334 2- { Type \\ { Filenames; } \\ [Description] } |
328 B: Change code | 335 B: Change code |
329 1- Record Code | 336 1- Record Code |
330 2- New code | 337 2- New code |
331 A: Labels | 338 A: Labels |
332 1- Record Code | 339 1- Record Code |
333 2- <Label\> | 340 2- <Label\\> |
334 interface: | 341 interface: |
335 """ | 342 """ |
336 # TODO: ~L ~J RTF and HTML files | 343 # TODO: ~L ~J RTF and HTML files |
337 # TODO: test ~Q ~J ~G | 344 # TODO: test ~Q ~J ~G |
338 # TODO: ~P. Registro tipo Descripción Paramétrica. | 345 # TODO: ~P. Registro tipo Descripción Paramétrica. |
339 # TODO: ~O. Registro tipo Relación Comercial. | 346 # TODO: ~O. Registro tipo Relación Comercial. |
340 # TODO: test records | 347 # TODO: test records |
341 _field_list = record.split(u"|") | 348 _field_list = record.split("|") |
342 self.__statistics.records = self.__statistics.records +1 | 349 self.__statistics.records = self.__statistics.records +1 |
343 _budget = self.__budget | 350 _budget = self.__budget |
344 if _field_list[0] == u"V": | 351 if _field_list[0] == "V": |
345 self.__statistics.V += 1 | 352 self.__statistics.V += 1 |
346 self._parseV(_field_list) | 353 self._parseV(_field_list) |
347 elif _field_list[0] == u"C": | 354 elif _field_list[0] == "C": |
348 self.__statistics.C += 1 | 355 self.__statistics.C += 1 |
349 self._parseC(_field_list, interface) | 356 self._parseC(_field_list, interface) |
350 elif _field_list[0] == u"D": | 357 elif _field_list[0] == "D": |
351 self.__statistics.D += 1 | 358 self.__statistics.D += 1 |
352 self._parseDY(_field_list, interface) | 359 self._parseDY(_field_list, interface) |
353 elif _field_list[0] == u"Y": | 360 elif _field_list[0] == "Y": |
354 self.__statistics.Y += 1 | 361 self.__statistics.Y += 1 |
355 self._parseDY(_field_list, interface) | 362 self._parseDY(_field_list, interface) |
356 elif _field_list[0] == u"M": | 363 elif _field_list[0] == "M": |
357 self.__statistics.M += 1 | 364 self.__statistics.M += 1 |
358 self._parseMN(_field_list) | 365 self._parseMN(_field_list) |
359 elif _field_list[0] == u"N": | 366 elif _field_list[0] == "N": |
360 self.__statistics.N += 1 | 367 self.__statistics.N += 1 |
361 self._parseMN(_field_list) | 368 self._parseMN(_field_list) |
362 elif _field_list[0] == u"T": | 369 elif _field_list[0] == "T": |
363 self.__statistics.T += 1 | 370 self.__statistics.T += 1 |
364 self._parseT(_field_list) | 371 self._parseT(_field_list) |
365 elif _field_list[0] == u"K": | 372 elif _field_list[0] == "K": |
366 self.__statistics.K += 1 | 373 self.__statistics.K += 1 |
367 self._parseK(_field_list) | 374 self._parseK(_field_list) |
368 elif _field_list[0] == u"W": | 375 elif _field_list[0] == "W": |
369 self.__statistics.W += 1 | 376 self.__statistics.W += 1 |
370 self._parseW(_field_list) | 377 self._parseW(_field_list) |
371 elif _field_list[0] == u"L": | 378 elif _field_list[0] == "L": |
372 self.__statistics.L += 1 | 379 self.__statistics.L += 1 |
373 self._parseL(_field_list) | 380 self._parseL(_field_list) |
374 elif _field_list[0] == u"Q": | 381 elif _field_list[0] == "Q": |
375 self.__statistics.Q += 1 | 382 self.__statistics.Q += 1 |
376 self._parseQ(_field_list) | 383 self._parseQ(_field_list) |
377 elif _field_list[0] == u"J": | 384 elif _field_list[0] == "J": |
378 self.__statistics.J += 1 | 385 self.__statistics.J += 1 |
379 self._parseJ(_field_list) | 386 self._parseJ(_field_list) |
380 elif _field_list[0] == u"G": | 387 elif _field_list[0] == "G": |
381 self.__statistics.G += 1 | 388 self.__statistics.G += 1 |
382 self._parseG(_field_list) | 389 self._parseG(_field_list) |
383 elif _field_list[0] == u"E": | 390 elif _field_list[0] == "E": |
384 self.__statistics.E += 1 | 391 self.__statistics.E += 1 |
385 self._parseE(_field_list) | 392 self._parseE(_field_list) |
386 elif _field_list[0] == "O": | 393 elif _field_list[0] == "O": |
387 self.__statistics.O += 1 | 394 self.__statistics.O += 1 |
388 elif _field_list[0] == u"P": | 395 elif _field_list[0] == "P": |
389 self.__statistics.P += 1 | 396 self.__statistics.P += 1 |
390 self._parseP(_field_list) | 397 self._parseP(_field_list) |
391 elif _field_list[0] == u"X": | 398 elif _field_list[0] == "X": |
392 self.__statistics.X += 1 | 399 self.__statistics.X += 1 |
393 self._parseX(_field_list) | 400 self._parseX(_field_list) |
394 elif _field_list[0] == u"B": | 401 elif _field_list[0] == "B": |
395 self.__statistics.B += 1 | 402 self.__statistics.B += 1 |
396 self._parseB(_field_list) | 403 self._parseB(_field_list) |
397 elif _field_list[0] == u"F": | 404 elif _field_list[0] == "F": |
398 self.__statistics.F += 1 | 405 self.__statistics.F += 1 |
399 self._parseF(_field_list) | 406 self._parseF(_field_list) |
400 elif _field_list[0] == u"A": | 407 elif _field_list[0] == "A": |
401 self.__statistics.A += 1 | 408 self.__statistics.A += 1 |
402 self._parseA(_field_list) | 409 self._parseA(_field_list) |
403 else: | 410 else: |
404 print(utils.mapping(_("FIEBDC. Unknow record: $1"), | 411 _tuni = _("FIEBDC. Unknow record: $1") |
405 (record[:100],)).encode("utf-8")) | 412 _uni = utils.mapping(_tuni, (record[:100],)) |
413 print(_uni) | |
406 self.__statistics.unknow += 1 | 414 self.__statistics.unknow += 1 |
407 | 415 |
408 def _parseV(self, field_list): | 416 def _parseV(self, field_list): |
409 """_parseV(field_list) | 417 """_parseV(field_list) |
410 | 418 |
419 7- [Data type] | 427 7- [Data type] |
420 8- [Number budget certificate] | 428 8- [Number budget certificate] |
421 9- [Date budget certificate] | 429 9- [Date budget certificate] |
422 """ | 430 """ |
423 if self.__statistics.records != 1: | 431 if self.__statistics.records != 1: |
424 print(utils.mapping(_("The 'V' record (Property and Version) "\ | 432 _tuni = _("The 'V' record (Property and Version) "\ |
425 "must be the first record in the file but it is the "\ | 433 "must be the first record in the file but it is the "\ |
426 "number: $1"), | 434 "number: $1") |
427 (str(self.__statistics.records),)).encode("utf-8") ) | 435 _uni = utils.mapping(_tuni, (text(self.__statistics.records),)) |
428 print(_("The default values were taken and this V record is "\ | 436 print(_uni) |
429 "ignored").encode("utf-8") ) | 437 _tuni = _("The default values were taken and this V record is "\ |
438 "ignored") | |
439 print(_tuni) | |
430 return | 440 return |
431 # _____number of fields_____ | 441 # _____number of fields_____ |
432 # Any INFORMATION after last field separator is ignored | 442 # Any INFORMATION after last field separator is ignored |
433 if len(field_list) > 10: | 443 if len(field_list) > 10: |
434 field_list = field_list[:10] | 444 field_list = field_list[:10] |
435 # If there are no sufficient fields, the fields are added | 445 # If there are no sufficient fields, the fields are added |
436 # with empty value:"" | 446 # with empty value:"" |
437 else: | 447 else: |
438 field_list = field_list + [u""]*(10-len(field_list)) | 448 field_list = field_list + [""]*(10-len(field_list)) |
439 # control character are erased: end of line, tab, space | 449 # control character are erased: end of line, tab, space |
440 # only leading and trailing whitespace in owner, generator, comment | 450 # only leading and trailing whitespace in owner, generator, comment |
441 # _____Fields_____ | 451 # _____Fields_____ |
442 _record_type = self.delete_control_space(field_list[0]) | 452 _record_type = self.delete_control_space(field_list[0]) |
443 _owner = field_list[1].strip() | 453 _owner = field_list[1].strip() |
446 _generator = field_list[3].strip() | 456 _generator = field_list[3].strip() |
447 _generator = self.delete_control(_generator) | 457 _generator = self.delete_control(_generator) |
448 _header_title = field_list[4].strip() | 458 _header_title = field_list[4].strip() |
449 _header_title = self.delete_control(_header_title) | 459 _header_title = self.delete_control(_header_title) |
450 _character_set = self.delete_control_space(field_list[5]) | 460 _character_set = self.delete_control_space(field_list[5]) |
451 _comment = field_list[6].strip(u"\t \n\r") | 461 _comment = field_list[6].strip("\t \n\r") |
452 _data_type = self.delete_control_space(field_list[7]) | 462 _data_type = self.delete_control_space(field_list[7]) |
453 _number_certificate = self.delete_control_space(field_list[8]) | 463 _number_certificate = self.delete_control_space(field_list[8]) |
454 __date_certificate = self.delete_control_space(field_list[9]) | 464 __date_certificate = self.delete_control_space(field_list[9]) |
455 # _____Owner_____ | 465 # _____Owner_____ |
456 self.__budget.setOwner(_owner) | 466 self.__budget.setOwner(_owner) |
457 # _____Version-Date_____ | 467 # _____Version-Date_____ |
458 _version_date = _version_date.split(u"\\") | 468 _version_date = _version_date.split("\\") |
459 _file_format = _version_date[0] | 469 _file_format = _version_date[0] |
460 if _file_format in self.__format_list: | 470 if _file_format in self.__format_list: |
461 self.__file_format = _file_format | 471 self.__file_format = _file_format |
462 print(utils.mapping(_("FIEBDC format: $1"), | 472 _tuni = _("FIEBDC format: $1") |
463 (_file_format,)).encode("utf-8") ) | 473 _uni = utils.mapping(_tuni, (_file_format,)) |
474 print(_uni) | |
464 | 475 |
465 if len(_version_date) > 1: | 476 if len(_version_date) > 1: |
466 _date = _version_date[1] | 477 _date = _version_date[1] |
467 if _date != u"": | 478 if _date != "": |
468 _parsed_date = self.parseDate(_date) | 479 _parsed_date = self.parseDate(_date) |
469 if _parsed_date is not None: | 480 if _parsed_date is not None: |
470 self.__budget.setDate(_parsed_date) | 481 self.__budget.setDate(_parsed_date) |
471 # _____Generator_____ | 482 # _____Generator_____ |
472 # ignored field | 483 # ignored field |
473 print(utils.mapping(_("FIEBDC file generated by $1"), | 484 _tuni = _("FIEBDC file generated by $1") |
474 (_generator,)).encode("utf-8") ) | 485 _uni = utils.mapping(_tuni, (_generator,)) |
486 print(_uni) | |
475 # _____Header_Title_____ | 487 # _____Header_Title_____ |
476 _header_title = _header_title.split(u"\\") | 488 _header_title = _header_title.split("\\") |
477 _header_title = [_title.strip() for _title in _header_title] | 489 _header_title = [_title.strip() for _title in _header_title] |
478 _header = _header_title.pop(0) | 490 _header = _header_title.pop(0) |
479 _header = [_item.encode("utf8") for _item in _header] | |
480 _title = [ ] | 491 _title = [ ] |
481 for _title_index in _header_title: | 492 for _title_index in _header_title: |
482 if _title_index != u"": | 493 if _title_index != "": |
483 _title.append(_title_index) | 494 _title.append(_title_index) |
484 _title = [_item.encode("utf8") for _item in _title] | 495 if _header != "": |
485 if _header != u"": | |
486 self.__budget.setTitleList([ _header, _title]) | 496 self.__budget.setTitleList([ _header, _title]) |
487 # _____Characters_set_____ | 497 # _____Characters_set_____ |
488 # field parsed in readFile method | 498 # field parsed in readFile method |
489 # _____Comment_____ | 499 # _____Comment_____ |
490 if _comment != u"": | 500 if _comment != "": |
491 self.__budget.setComment(_comment.encode("utf8")) | 501 self.__budget.setComment(_comment) |
492 # _____Data type_____ | 502 # _____Data type_____ |
493 # 1 -> Base data. | 503 # 1 -> Base data. |
494 # 2 -> Budget. | 504 # 2 -> Budget. |
495 # 3 -> Budget certificate. | 505 # 3 -> Budget certificate. |
496 # 4 -> Base date update. | 506 # 4 -> Base date update. |
545 # _____Fields_____ | 555 # _____Fields_____ |
546 _field0 = self.delete_control_space(field_list[0]) | 556 _field0 = self.delete_control_space(field_list[0]) |
547 _field1 = self.delete_control_space(field_list[1]) | 557 _field1 = self.delete_control_space(field_list[1]) |
548 _field2 = self.delete_control_space(field_list[2]) | 558 _field2 = self.delete_control_space(field_list[2]) |
549 # _____Field 1_____ | 559 # _____Field 1_____ |
550 if len(_field1) > 0 and _field1[-1] == u"\\": | 560 if len(_field1) > 0 and _field1[-1] == "\\": |
551 _field1 = _field1[:-1] | 561 _field1 = _field1[:-1] |
552 # if there are a \ character at the end it must be erased | 562 # if there are a \ character at the end it must be erased |
553 _percentages = _field1.split(u"\\") | 563 _percentages = _field1.split("\\") |
554 if len(_percentages) > 5: | 564 if len(_percentages) > 5: |
555 _percentages = _percentages[:5] | 565 _percentages = _percentages[:5] |
556 # If there are no sufficient subfields, the subfields are added | 566 # If there are no sufficient subfields, the subfields are added |
557 # with empty value:"" | 567 # with empty value:"" |
558 else: | 568 else: |
559 _percentages = _percentages + [u""]*(5-len(_percentages)) | 569 _percentages = _percentages + [""]*(5-len(_percentages)) |
560 _percentage_titles = [ "CI", "GG", "BI", "BAJA", "IVA" ] | 570 _percentage_titles = [ "CI", "GG", "BI", "BAJA", "IVA" ] |
561 _percentage_dict = {} | 571 _percentage_dict = {} |
562 for _percentage_index in range(len(_percentages)): | 572 for _percentage_index in range(len(_percentages)): |
563 try: | 573 try: |
564 _percentage = int(_percentages[_percentage_index]) | 574 _percentage = int(_percentages[_percentage_index]) |
571 # Default number of decimal places | 581 # Default number of decimal places |
572 # Number of titles in ~V record | 582 # Number of titles in ~V record |
573 _title_num = len(self.__budget.getTitleList()[1]) | 583 _title_num = len(self.__budget.getTitleList()[1]) |
574 if _title_num == 0: _title_num = 1 | 584 if _title_num == 0: _title_num = 1 |
575 # If the field 2 is empty, the field 0 is readed | 585 # If the field 2 is empty, the field 0 is readed |
576 if _field2 == u"": | 586 if _field2 == "": |
577 # _____Field 0_____ | 587 # _____Field 0_____ |
578 if _field0[-1] == u"\\": | 588 if _field0[-1] == "\\": |
579 _field0 = _field0[:-1] | 589 _field0 = _field0[:-1] |
580 # if there are a \ character at the end it must be erased | 590 # if there are a \ character at the end it must be erased |
581 _decimal_list = _field0.split(u"\\") | 591 _decimal_list = _field0.split("\\") |
582 _decimal_index = 0 | 592 _decimal_index = 0 |
583 if len(_decimal_list)%9 != 0: | 593 if len(_decimal_list)%9 != 0: |
584 # if it is not multiple of 9, empty subfield are added | 594 # if it is not multiple of 9, empty subfield are added |
585 _decimal_list = _decimal_list + [""]*(9 - \ | 595 _decimal_list = _decimal_list + [""]*(9 - \ |
586 len(_decimal_list)%9) | 596 len(_decimal_list)%9) |
777 if len(field_list) > 7: | 787 if len(field_list) > 7: |
778 field_list = field_list[:7] | 788 field_list = field_list[:7] |
779 # If there are no sufficient fields, the fields are added | 789 # If there are no sufficient fields, the fields are added |
780 # with empty value:"" | 790 # with empty value:"" |
781 else: | 791 else: |
782 field_list = field_list + [u""]*(7-len(field_list)) | 792 field_list = field_list + [""]*(7-len(field_list)) |
783 # control character are erased: en of line, tab, space | 793 # control character are erased: en of line, tab, space |
784 # _____Fields_____ | 794 # _____Fields_____ |
785 _record_type = field_list[0] | 795 _record_type = field_list[0] |
786 _codes = self.delete_control_space(field_list[1]) | 796 _codes = self.delete_control_space(field_list[1]) |
787 _unit = self.delete_control_space(field_list[2]) | 797 _unit = self.delete_control_space(field_list[2]) |
788 _summary = self.delete_control(field_list[3]) | 798 _summary = self.delete_control(field_list[3]) |
789 _prices = self.delete_control_space(field_list[4]) | 799 _prices = self.delete_control_space(field_list[4]) |
790 _dates = self.delete_control_space(field_list[5]) | 800 _dates = self.delete_control_space(field_list[5]) |
791 _type = self.delete_control_space(field_list[6]) | 801 _type = self.delete_control_space(field_list[6]) |
792 # _____Code_____ | 802 # _____Code_____ |
793 _codes = _codes.split(u"\\") | 803 _codes = _codes.split("\\") |
794 if len(_codes) > 0: | 804 if len(_codes) > 0: |
795 # parse the hierarchy of the first code | 805 # parse the hierarchy of the first code |
796 # hierarchy: 0->root, 1->Chapter/subchapter, 2->other | 806 # hierarchy: 0->root, 1->Chapter/subchapter, 2->other |
797 if len(_codes[0]) > 1 and _codes[0][-2:] == u"##": | 807 if len(_codes[0]) > 1 and _codes[0][-2:] == "##": |
798 _hierarchy = 0 | 808 _hierarchy = 0 |
799 elif len(_codes[0]) > 0 and _codes[0][-1:] == u"#": | 809 elif len(_codes[0]) > 0 and _codes[0][-1:] == "#": |
800 _hierarchy = 1 | 810 _hierarchy = 1 |
801 else: | 811 else: |
802 _hierarchy = 2 | 812 _hierarchy = 2 |
803 # "#" and "##" characters at the end of the code are erased | 813 # "#" and "##" characters at the end of the code are erased |
804 # invalid characters are also erased | 814 # invalid characters are also erased |
805 # maximun len 20 characters | 815 # maximun len 20 characters |
806 _codes = [self.validateCode(_code) for _code in _codes] | 816 _codes = [self.validateCode(_code) for _code in _codes] |
807 # empty codes are ignored | 817 # empty codes are ignored |
808 while u"" in _codes: | 818 while "" in _codes: |
809 _codes.remove(u"") | 819 _codes.remove("") |
810 if len(_codes) > 0: | 820 if len(_codes) > 0: |
811 #TODO: test this | 821 #TODO: test this |
812 _code = _codes[0] | 822 _code = _codes[0] |
813 _synonyms = [synonym.encode("utf8") for synonym in _codes] | 823 _synonyms = [synonym for synonym in _codes] |
814 else: | 824 else: |
815 print(_("Record C without a valid code").encode("utf-8") ) | 825 _tuni = _("Record C without a valid code") |
826 print(_tuni) | |
816 return | 827 return |
817 # _____Unit_____ | 828 # _____Unit_____ |
818 # nothing to do | 829 # nothing to do |
819 # _____Summary_____ | 830 # _____Summary_____ |
820 # nothing to do | 831 # nothing to do |
821 # _____Price_____ and _____Dates_____ | 832 # _____Price_____ and _____Dates_____ |
822 # last \ is erased | 833 # last \ is erased |
823 if len(_dates) > 0 and _dates[-1] == u"\\": | 834 if len(_dates) > 0 and _dates[-1] == "\\": |
824 _dates = _dates[:-1] | 835 _dates = _dates[:-1] |
825 if len(_prices) > 0 and _prices[-1] == u"\\": | 836 if len(_prices) > 0 and _prices[-1] == "\\": |
826 _prices = _prices[:-1] | 837 _prices = _prices[:-1] |
827 interface.updateGui() | 838 interface.updateGui() |
828 _dates = _dates.split(u"\\") | 839 _dates = _dates.split("\\") |
829 _prices = _prices.split(u"\\") | 840 _prices = _prices.split("\\") |
830 # number of prices = number of titles in "V" line | 841 # number of prices = number of titles in "V" line |
831 # if there are no sufficient prices it takes the last price defined | 842 # if there are no sufficient prices it takes the last price defined |
832 _title_num = len(self.__budget.getTitleList()[1]) | 843 _title_num = len(self.__budget.getTitleList()[1]) |
833 if _title_num == 0: _title_num = 1 | 844 if _title_num == 0: _title_num = 1 |
834 if len(_prices) > _title_num: _prices = _prices[:_title_num] | 845 if len(_prices) > _title_num: _prices = _prices[:_title_num] |
881 # 1 -> None,H | 892 # 1 -> None,H |
882 # 2 -> None,Q,% | 893 # 2 -> None,Q,% |
883 # 3 -> None,MC,MCr,MM,MS,ME,MCu,Mal,ML,M | 894 # 3 -> None,MC,MCr,MM,MS,ME,MCu,Mal,ML,M |
884 interface.updateGui() | 895 interface.updateGui() |
885 if _hierarchy == 0: | 896 if _hierarchy == 0: |
886 if _type == u"OB": | 897 if _type == "OB": |
887 _subtype = _type | 898 _subtype = _type |
888 _type = 0 | 899 _type = 0 |
889 elif _type == u"0" or _type == u"": | 900 elif _type == "0" or _type == "": |
890 _subtype = u"" | 901 _subtype = "" |
891 _type = 0 | 902 _type = 0 |
892 else: | 903 else: |
893 print(utils.mapping(_("Incorrect type ($1) in the code $2"), | 904 _tuni = _("Incorrect type ($1) in the code $2") |
894 (_type.encode("utf8"), | 905 _uni = utils.mapping(_tuni, (_type, _code)) |
895 _code.encode("utf8"))).encode("utf-8") ) | 906 print(_uni) |
896 _type = 0 | 907 _type = 0 |
897 _subtype = u"" | 908 _subtype = "" |
898 elif _hierarchy == 1: | 909 elif _hierarchy == 1: |
899 if _type == u"PU": | 910 if _type == "PU": |
900 _subtype = _type | 911 _subtype = _type |
901 _type = 0 | 912 _type = 0 |
902 elif _type == u"0" or _type == u"": | 913 elif _type == "0" or _type == "": |
903 _subtype = u"" | 914 _subtype = "" |
904 _type = 0 | 915 _type = 0 |
905 else: | 916 else: |
906 print(utils.mapping(_("Incorrect type ($1) in the code $2"), | 917 _tuni = _("Incorrect type ($1) in the code $2") |
907 (_type.encode("utf8"), | 918 _uni = utils.mapping(_tuni, (_type, _code)) |
908 _code.encode("utf8"))).encode("utf-8") ) | 919 print(_uni) |
909 _type = 0 | 920 _type = 0 |
910 _subtype = u"" | 921 _subtype = "" |
911 else: | 922 else: |
912 if _type == u"EA" or _type == u"EU" or _type == u"EC" or \ | 923 if _type == "EA" or _type == "EU" or _type == "EC" or \ |
913 _type == u"EF" or _type == u"PA": | 924 _type == "EF" or _type == "PA": |
914 _subtype = _type | 925 _subtype = _type |
915 _type = 0 | 926 _type = 0 |
916 elif _type == u"H": | 927 elif _type == "H": |
917 _subtype = _type | 928 _subtype = _type |
918 _type = 1 | 929 _type = 1 |
919 elif _type == u"Q" or _type == u"%": | 930 elif _type == "Q" or _type == "%": |
920 _subtype = _type | 931 _subtype = _type |
921 _type = 2 | 932 _type = 2 |
922 elif _type == u"MC" or _type == u"MCr" or _type == u"MM" or \ | 933 elif _type == "MC" or _type == "MCr" or _type == "MM" or \ |
923 _type == u"MS" or _type == u"ME" or _type == u"MCu" or \ | 934 _type == "MS" or _type == "ME" or _type == "MCu" or \ |
924 _type == u"Mal" or _type == u"ML" or _type == u"M": | 935 _type == "Mal" or _type == "ML" or _type == "M": |
925 _subtype = _type | 936 _subtype = _type |
926 _type = 3 | 937 _type = 3 |
927 elif _type == u"0" or _type == u"1" or _type == u"2" or \ | 938 elif _type == "0" or _type == "1" or _type == "2" or \ |
928 _type == u"3": | 939 _type == "3": |
929 _subtype = u"" | 940 _subtype = "" |
930 _type = int(_type) | 941 _type = int(_type) |
931 elif _type == u"": | 942 elif _type == "": |
932 _subtype = u"" | 943 _subtype = "" |
933 _type = 0 | 944 _type = 0 |
934 else: | 945 else: |
935 print(utils.mapping(_("Incorrect type ($1) in the code $2"), | 946 _tuni = _("Incorrect type ($1) in the code $2") |
936 (_type.encode("utf8"), | 947 _uni = utils.mapping(_tuni, (_type, _code)) |
937 _code.encode("utf8"))).encode("utf-8") ) | 948 print(_uni) |
938 _type = 0 | 949 _type = 0 |
939 _subtype = u"" | 950 _subtype = "" |
940 self.__budget.setRecord(_code.encode("utf8"), _synonyms, _hierarchy, | 951 self.__budget.setRecord(_code, _synonyms, _hierarchy, |
941 _unit.encode("utf8"), _summary.encode("utf8"), | 952 _unit, _summary, |
942 _prices, _dates, _type, _subtype.encode("utf8")) | 953 _prices, _dates, _type, _subtype) |
943 self.__statistics.valid = self.__statistics.valid + 1 | 954 self.__statistics.valid = self.__statistics.valid + 1 |
944 | 955 |
945 def _parseDY(self, field_list, interface): | 956 def _parseDY(self, field_list, interface): |
946 """_parseDY(field_list) | 957 """_parseDY(field_list) |
947 | 958 |
956 if len(field_list) > 3: | 967 if len(field_list) > 3: |
957 field_list = field_list[:3] | 968 field_list = field_list[:3] |
958 # If there are no sufficient fields, the fields are added | 969 # If there are no sufficient fields, the fields are added |
959 # with empty value:"" | 970 # with empty value:"" |
960 else: | 971 else: |
961 field_list = field_list + [u""]*(3-len(field_list)) | 972 field_list = field_list + [""]*(3-len(field_list)) |
962 # control character are erased: end of line, tab, space | 973 # control character are erased: end of line, tab, space |
963 # _____Fields_____ | 974 # _____Fields_____ |
964 _record_type = field_list[0] | 975 _record_type = field_list[0] |
965 _code = self.delete_control_space(field_list[1]) | 976 _code = self.delete_control_space(field_list[1]) |
966 _children = self.delete_control_space(field_list[2]) | 977 _children = self.delete_control_space(field_list[2]) |
969 # "#" and "##" characters at the end of the code are erased | 980 # "#" and "##" characters at the end of the code are erased |
970 # invalid characters are also erased | 981 # invalid characters are also erased |
971 _code = self.validateCode(_code) | 982 _code = self.validateCode(_code) |
972 # _____children_____ | 983 # _____children_____ |
973 # TODO: test the number of decimals in factor an yield values | 984 # TODO: test the number of decimals in factor an yield values |
974 _children = _children.split(u"\\") | 985 _children = _children.split("\\") |
975 _children_list = [ ] | 986 _children_list = [ ] |
976 _child_index = 0 | 987 _child_index = 0 |
977 interface.updateGui() | 988 interface.updateGui() |
978 while _child_index < len(_children)-3: | 989 while _child_index < len(_children)-3: |
979 # _____subfields_____ | 990 # _____subfields_____ |
981 _factor = _children[_child_index+1] | 992 _factor = _children[_child_index+1] |
982 _yield = _children[_child_index+2] | 993 _yield = _children[_child_index+2] |
983 # _____child_code_____ | 994 # _____child_code_____ |
984 _child_code = self.validateCode(_child_code) | 995 _child_code = self.validateCode(_child_code) |
985 # _____factor_____ | 996 # _____factor_____ |
986 if _factor != u"": | 997 if _factor != "": |
987 try: | 998 try: |
988 _factor = float(_factor) | 999 _factor = float(_factor) |
989 except ValueError: | 1000 except ValueError: |
990 print(utils.mapping(_("ValueError loadig the "\ | 1001 _tuni = _("ValueError loadig the "\ |
991 "descomposition of the record $1, the factor "\ | 1002 "descomposition of the record $1, the factor "\ |
992 "of the child $2 must be a float number and "\ | 1003 "of the child $2 must be a float number and "\ |
993 "can not be $3, seted default value 1.0"), | 1004 "can not be $3, seted default value 1.0") |
994 (_code.encode("utf8"), _child_code.encode("utf8"), | 1005 _uni = utils.mapping(_tuni, (_code, _child_code, _factor)) |
995 _factor.encode("utf8"))).encode("utf-8") ) | 1006 print(_uni) |
996 _factor = 1.0 | 1007 _factor = 1.0 |
997 #____yield___ | 1008 #____yield___ |
998 if _yield != u"": | 1009 if _yield != "": |
999 try: | 1010 try: |
1000 _yield = float(_yield) | 1011 _yield = float(_yield) |
1001 except ValueError: | 1012 except ValueError: |
1002 print(utils.mapping(_("ValueError loading the "\ | 1013 _tuni = _("ValueError loading the "\ |
1003 "descomposition of the record $1, the yield of "\ | 1014 "descomposition of the record $1, the yield of "\ |
1004 "the child $2, must be a float number and can"\ | 1015 "the child $2, must be a float number and can"\ |
1005 "not be $3, seted default value 1.0"), | 1016 "not be $3, seted default value 1.0") |
1006 (_code.encode("utf8"), _child_code.encode("utf8"), | 1017 _uni = utils.mapping(_tuni, (_code, _child_code, _factor)) |
1007 _factor.encode("utf8"))).encode("utf-8") ) | 1018 print(_uni) |
1008 _yield = 1.0 | 1019 _yield = 1.0 |
1009 if _child_code != u"" and _code != u"": | 1020 if _child_code != "" and _code != "": |
1010 _children_list.append([_child_code, _factor, _yield ]) | 1021 _children_list.append([_child_code, _factor, _yield ]) |
1011 if _record_type == u"D": | 1022 if _record_type == "D": |
1012 _position = _child_index / 3 | 1023 _position = _child_index // 3 |
1013 else: #_record_type == "Y" | 1024 else: #_record_type == "Y" |
1014 _position = -1 | 1025 _position = -1 |
1015 self.__budget.setTree(_code.encode("utf8"), | 1026 self.__budget.setTree(_code, |
1016 _child_code.encode("utf8"), _position, _factor, | 1027 _child_code, _position, _factor, |
1017 _yield, "", "", "", "") | 1028 _yield, "", "", "", "") |
1018 _child_index = _child_index + 3 | 1029 _child_index = _child_index + 3 |
1019 interface.updateGui() | 1030 interface.updateGui() |
1020 self.__statistics.valid = self.__statistics.valid +1 | 1031 self.__statistics.valid = self.__statistics.valid +1 |
1021 | 1032 |
1042 # _____Code_____ | 1053 # _____Code_____ |
1043 # "#" and "##" characters at the end of the code are erased | 1054 # "#" and "##" characters at the end of the code are erased |
1044 # invalid characters are also erased | 1055 # invalid characters are also erased |
1045 _code = self.validateCode(_code) | 1056 _code = self.validateCode(_code) |
1046 # _____Text_____ | 1057 # _____Text_____ |
1047 self.__budget.setText(_code.encode("utf8"), _text.encode("utf8")) | 1058 self.__budget.setText(_code, _text) |
1048 self.__statistics.valid = self.__statistics.valid + 1 | 1059 self.__statistics.valid = self.__statistics.valid + 1 |
1049 | 1060 |
1050 def _parseMN(self, field_list): | 1061 def _parseMN(self, field_list): |
1051 """_parseMN(field_list) | 1062 """_parseMN(field_list) |
1052 | 1063 |
1053 field_list: field list of the record | 1064 field_list: field list of the record |
1054 0- M or N: MEASURE or ADD MEASURE | 1065 0- M or N: MEASURE or ADD MEASURE |
1055 1- [Parent Code\]Child Code | 1066 1- [Parent Code\\]Child Code |
1056 2- {Path\} | 1067 2- {Path\\} |
1057 3- TOTAL MEASURE | 1068 3- TOTAL MEASURE |
1058 4- {Type\Comment\Unit\Length\Width\Height\} | 1069 4- {Type\\Comment\\Unit\\Length\\Width\\Height\\} |
1059 5- [Label] | 1070 5- [Label] |
1060 """ | 1071 """ |
1061 # _____Number of fields_____ | 1072 # _____Number of fields_____ |
1062 # Any INFORMATION after last field separator is ignored | 1073 # Any INFORMATION after last field separator is ignored |
1063 # The record must have 6 fields | 1074 # The record must have 6 fields |
1064 if len(field_list) > 6: | 1075 if len(field_list) > 6: |
1065 field_list = field_list[:6] | 1076 field_list = field_list[:6] |
1066 # If there are no sufficient fields, the fields are added | 1077 # If there are no sufficient fields, the fields are added |
1067 # with empty value:"" | 1078 # with empty value:"" |
1068 else: | 1079 else: |
1069 field_list = field_list + [u""]*(6-len(field_list)) | 1080 field_list = field_list + [""]*(6-len(field_list)) |
1070 # control character are erased: end of line, tab, space | 1081 # control character are erased: end of line, tab, space |
1071 # _____Fields_____ | 1082 # _____Fields_____ |
1072 _record_type = field_list[0] | 1083 _record_type = field_list[0] |
1073 _codes = self.delete_control_space(field_list[1]) | 1084 _codes = self.delete_control_space(field_list[1]) |
1074 _path = self.delete_control_space(field_list[2]) | 1085 _path = self.delete_control_space(field_list[2]) |
1075 _total = self.delete_control_space(field_list[3]) | 1086 _total = self.delete_control_space(field_list[3]) |
1076 _lines = self.delete_control(field_list[4]) | 1087 _lines = self.delete_control(field_list[4]) |
1077 _label = self.delete_control_space(field_list[5]) | 1088 _label = self.delete_control_space(field_list[5]) |
1078 # _____Codes_____ | 1089 # _____Codes_____ |
1079 _code_list = _codes.split(u"\\") | 1090 _code_list = _codes.split("\\") |
1080 # "#" and "##" characters at the end of the code are erased | 1091 # "#" and "##" characters at the end of the code are erased |
1081 # invalid characters are also erased | 1092 # invalid characters are also erased |
1082 if len(_code_list) == 2: | 1093 if len(_code_list) == 2: |
1083 _parent_code = self.validateCode(_code_list[0]) | 1094 _parent_code = self.validateCode(_code_list[0]) |
1084 if _parent_code == u"": | 1095 if _parent_code == "": |
1085 _parent_code = None | 1096 _parent_code = None |
1086 else: | 1097 _child_code = self.validateCode(_code_list[1]) |
1087 _parent_code = _parent_code.encode("utf8") | |
1088 _child_code = self.validateCode(_code_list[1]) | |
1089 elif len(_code_list) == 1: | 1098 elif len(_code_list) == 1: |
1090 _child_code = self.validateCode(_code_list[0]) | 1099 _child_code = self.validateCode(_code_list[0]) |
1091 _parent_code = None | 1100 _parent_code = None |
1092 else: | 1101 else: |
1093 print(utils.mapping(_("Invalid codes in $1 record, codes $2"), | 1102 _tuni = _("Invalid codes in $1 record, codes $2") |
1094 (_record_type.encode("utf8"), | 1103 _uni = utils.mapping(_tuni, (_record_type, _codes)) |
1095 _codes.encode("utf8"))).encode("utf-8") ) | 1104 print(_uni) |
1096 return | 1105 return |
1097 if _child_code == u"": | 1106 if _child_code == "": |
1098 print(utils.mapping(_("Empty child code in $1 record, codes: "\ | 1107 _tuni = _("Empty child code in $1 record, codes: $2") |
1099 "$2"), (_record_type.encode("utf8"), | 1108 _uni = utils.mapping(_tuni, (_record_type, _codes)) |
1100 _codes.encode("utf8"))).encode("utf-8") ) | 1109 print(_uni) |
1101 return | 1110 return |
1102 if _parent_code == None: | 1111 if _parent_code == None: |
1103 # Empty parent code. No-estructured measures. | 1112 # Empty parent code. No-estructured measures. |
1104 pass | 1113 pass |
1105 | 1114 |
1106 # _____Path_____ | 1115 # _____Path_____ |
1107 _path_list = _path.split( u"\\" ) | 1116 _path_list = _path.split( "\\" ) |
1108 if len(_path_list) > 0: | 1117 if len(_path_list) > 0: |
1109 while len(_path_list) > 0 and _path_list[-1] == u"": | 1118 while len(_path_list) > 0 and _path_list[-1] == "": |
1110 _path_list = _path_list[:-1] | 1119 _path_list = _path_list[:-1] |
1111 if len(_path_list) == 0: | 1120 if len(_path_list) == 0: |
1112 # Empty path. No-estructured measures. Path fixed to -2 | 1121 # Empty path. No-estructured measures. Path fixed to -2 |
1113 _path = -2 | 1122 _path = -2 |
1114 else: | 1123 else: |
1115 _path = _path_list[-1] | 1124 _path = _path_list[-1] |
1116 try: | 1125 try: |
1117 _path = int(_path) | 1126 _path = int(_path) |
1118 except ValueError: | 1127 except ValueError: |
1119 print(utils.mapping(_("Invalid path in $1 record, "\ | 1128 _tuni = _("Invalid path in $1 record, codes $2") |
1120 "codes $2"), (_record_type.encode("utf8"), | 1129 _uni = utils.mapping(_tuni, (_record_type, _codes)) |
1121 _codes.encode("utf8"))).encode("utf-8") ) | 1130 print(_uni) |
1122 return | 1131 return |
1123 if _path > 0: | 1132 if _path > 0: |
1124 _path -= 1 | 1133 _path -= 1 |
1125 else: | 1134 else: |
1126 _path = -2 | 1135 _path = -2 |
1127 # _____Total_____ | 1136 # _____Total_____ |
1128 try: | 1137 try: |
1129 _total = float(_total) | 1138 _total = float(_total) |
1130 except ValueError: | 1139 except ValueError: |
1131 print(utils.mapping(_("Invalid Total Measure value in $1 "\ | 1140 _tuni = _("Invalid Total Measure value in $1 "\ |
1132 "record, codes $2. Total fixed to 0."), | 1141 "record, codes $2. Total fixed to 0.") |
1133 (_record_type.encode("utf8"), | 1142 _uni = utils.mapping(_tuni, (_record_type, _codes)) |
1134 _codes.encode("utf8"))).encode("utf-8") ) | 1143 print(_uni) |
1135 _total = 0 | 1144 _total = 0 |
1136 # _____Measure lines_____ | 1145 # _____Measure lines_____ |
1137 _lines = _lines.split(u"\\") | 1146 _lines = _lines.split("\\") |
1138 _line_index = 0 | 1147 _line_index = 0 |
1139 _line_list = [ ] | 1148 _line_list = [ ] |
1140 while _line_index < len(_lines)-6: | 1149 while _line_index < len(_lines)-6: |
1141 _linetype = _lines[_line_index] | 1150 _linetype = _lines[_line_index] |
1142 if _linetype == u"": | 1151 if _linetype == "": |
1143 _linetype = 0 | 1152 _linetype = 0 |
1144 elif _linetype == u"1" or _linetype == u"2" or \ | 1153 elif _linetype == "1" or _linetype == "2" or \ |
1145 _linetype == u"3": | 1154 _linetype == "3": |
1146 _linetype = int(_linetype) | 1155 _linetype = int(_linetype) |
1147 else: | 1156 else: |
1148 _linetype = 0 | 1157 _linetype = 0 |
1149 _comment= _lines[_line_index + 1] | 1158 _comment= _lines[_line_index + 1] |
1150 if _linetype == 3: | 1159 if _linetype == 3: |
1151 # "formula": ".*[^0123456789\.()\+\-\*/\^abcdp ].*" | 1160 # "formula": ".*[^0123456789\.()\+\-\*/\^abcdp ].*" |
1152 if self.__pattern["formula"].match(_comment): | 1161 if self.__pattern["formula"].match(_comment): |
1153 print(utils.mapping(_("The comment is not a formula or "\ | 1162 _tuni = _("The comment is not a formula or "\ |
1154 "its have invalid characters, in the $1 record, "\ | 1163 "its have invalid characters, in the $1 record, "\ |
1155 "codes $2"), (_record_type.encode("utf8"), | 1164 "codes $2") |
1156 _codes.encode("utf8"))).encode("utf-8") ) | 1165 _uni = utils.mapping(_tuni, (_record_type, _codes)) |
1166 print(_uni) | |
1157 return | 1167 return |
1158 else: | 1168 else: |
1159 _formula = _comment.encode("utf8") | 1169 _formula = _comment |
1160 _comment = "" | 1170 _comment = "" |
1161 else: | 1171 else: |
1162 _formula = "" | 1172 _formula = "" |
1163 _comment = _comment.encode("utf8") | |
1164 _units = _lines[_line_index + 2] | 1173 _units = _lines[_line_index + 2] |
1165 _units = self.__pattern["no_float"].sub(u"", _units) | 1174 _units = self.__pattern["no_float"].sub("", _units) |
1166 _length = _lines[_line_index + 3] | 1175 _length = _lines[_line_index + 3] |
1167 _length = self.__pattern["no_float"].sub(u"", _length) | 1176 _length = self.__pattern["no_float"].sub("", _length) |
1168 _width = _lines[_line_index + 4] | 1177 _width = _lines[_line_index + 4] |
1169 _width = self.__pattern["no_float"].sub(u"", _width) | 1178 _width = self.__pattern["no_float"].sub("", _width) |
1170 _height = _lines[_line_index + 5] | 1179 _height = _lines[_line_index + 5] |
1171 _height = self.__pattern["no_float"].sub(u"", _height) | 1180 _height = self.__pattern["no_float"].sub("", _height) |
1172 | 1181 |
1173 try: | 1182 try: |
1174 if _units != u"": | 1183 if _units != "": |
1175 _units = float(_units) | 1184 _units = float(_units) |
1176 if _length != u"": _length = float(_length) | 1185 if _length != "": _length = float(_length) |
1177 if _width != u"": _width = float(_width) | 1186 if _width != "": _width = float(_width) |
1178 if _height != u"": _height = float(_height) | 1187 if _height != "": _height = float(_height) |
1179 except ValueError: | 1188 except ValueError: |
1180 print(utils.mapping(_("The measure values are not float "\ | 1189 _tuni = _("The measure values are not float "\ |
1181 "numbers, code $1"), | 1190 "numbers, code $1") |
1182 (_codes.encode("utf8"),)).encode("utf-8") ) | 1191 _uni = utils.mapping(_tuni, (_codes,)) |
1192 print(_uni) | |
1183 return | 1193 return |
1184 # Prevent subfield units remains empty. | 1194 # Prevent subfield units remains empty. |
1185 if (_units == u"" and (_length != u"" or _width != u"" | 1195 if (_units == "" and (_length != "" or _width != "" |
1186 or _height != u"")): | 1196 or _height != "")): |
1187 _units = 1.0 | 1197 _units = 1.0 |
1188 _line_list.append([_linetype, _comment, _units, | 1198 _line_list.append([_linetype, _comment, _units, |
1189 _length, _width, _height, _formula]) | 1199 _length, _width, _height, _formula]) |
1190 _line_index = _line_index + 6 | 1200 _line_index = _line_index + 6 |
1191 self.__budget.setTree(_parent_code, _child_code.encode("utf8"), _path, "", "", | 1201 self.__budget.setTree(_parent_code, _child_code, _path, "", "", |
1192 _total, _line_list, _label.encode("utf8"), | 1202 _total, _line_list, _label, |
1193 _record_type.encode("utf8")) | 1203 _record_type) |
1194 self.__statistics.valid = self.__statistics.valid + 1 | 1204 self.__statistics.valid = self.__statistics.valid + 1 |
1195 | 1205 |
1196 def _parseW(self, field_list): | 1206 def _parseW(self, field_list): |
1197 """_parseW(field_list) | 1207 """_parseW(field_list) |
1198 | 1208 |
1210 return | 1220 return |
1211 # control character are erased: end of line, tab, space | 1221 # control character are erased: end of line, tab, space |
1212 # _____Fields_____ | 1222 # _____Fields_____ |
1213 _code_fields = field_list[0] | 1223 _code_fields = field_list[0] |
1214 # last \ is erased | 1224 # last \ is erased |
1215 if len(_code_fields) and _code_fields[-1] == u"\\": | 1225 if len(_code_fields) and _code_fields[-1] == "\\": |
1216 _code_fields = _code_fields[:-1] | 1226 _code_fields = _code_fields[:-1] |
1217 _code_fields = _code_fields.split(u"\\") | 1227 _code_fields = _code_fields.split("\\") |
1218 _field_dict = {} | 1228 _field_dict = {} |
1219 _field_index = 0 | 1229 _field_index = 0 |
1220 while _field_index < len(_code_fields)-1: | 1230 while _field_index < len(_code_fields)-1: |
1221 # _____subfields_____ | 1231 # _____subfields_____ |
1222 _field_code = _code_fields[_field_index] | 1232 _field_code = _code_fields[_field_index] |
1224 # control character are erased: end of line, tab, space | 1234 # control character are erased: end of line, tab, space |
1225 # _____section_code_____ | 1235 # _____section_code_____ |
1226 #"control": "[\t \n\r]" | 1236 #"control": "[\t \n\r]" |
1227 _field_code = self.delete_control_space(_field_code) | 1237 _field_code = self.delete_control_space(_field_code) |
1228 # _____section_title_____ | 1238 # _____section_title_____ |
1229 if _field_code != u"": | 1239 if _field_code != "": |
1230 _e_field_code = _field_code.encode("utf8") | 1240 _field_dict[_field_code] = _field_title |
1231 _e_field_title = _field_title.encode("utf8") | |
1232 _field_dict[_e_field_code] = _e_field_title | |
1233 _field_index = _field_index + 2 | 1241 _field_index = _field_index + 2 |
1234 self.__budget.setSheetFields(_field_dict) | 1242 self.__budget.setSheetFields(_field_dict) |
1235 self.__statistics.valid = self.__statistics.valid +1 | 1243 self.__statistics.valid = self.__statistics.valid +1 |
1236 | 1244 |
1237 def _parseL(self, field_list): | 1245 def _parseL(self, field_list): |
1251 # _____Number of fields_____ | 1259 # _____Number of fields_____ |
1252 # The record must have at least 3 fields | 1260 # The record must have at least 3 fields |
1253 if len(field_list) < 3: | 1261 if len(field_list) < 3: |
1254 return | 1262 return |
1255 _code = field_list[1] | 1263 _code = field_list[1] |
1256 if _code == u"": | 1264 if _code == "": |
1257 # A: Section Titles | 1265 # A: Section Titles |
1258 # Any INFORMATION after last field separator is ignored | 1266 # Any INFORMATION after last field separator is ignored |
1259 # The record must have 3 fields | 1267 # The record must have 3 fields |
1260 if len(field_list) > 3: | 1268 if len(field_list) > 3: |
1261 field_list = field_list[0:3] | 1269 field_list = field_list[0:3] |
1262 field_list = field_list[1:3] | 1270 field_list = field_list[1:3] |
1263 # _____Fields_____ | 1271 # _____Fields_____ |
1264 _section_codes = field_list[1] | 1272 _section_codes = field_list[1] |
1265 # last \ is erased | 1273 # last \ is erased |
1266 if len(_section_codes) and _section_codes[-1] == u"\\": | 1274 if len(_section_codes) and _section_codes[-1] == "\\": |
1267 _section_codes = _section_codes[:-1] | 1275 _section_codes = _section_codes[:-1] |
1268 _section_codes = _section_codes.split(u"\\") | 1276 _section_codes = _section_codes.split("\\") |
1269 _section_dict = {} | 1277 _section_dict = {} |
1270 _section_index = 0 | 1278 _section_index = 0 |
1271 while _section_index < len(_section_codes)-1: | 1279 while _section_index < len(_section_codes)-1: |
1272 # _____subfields_____ | 1280 # _____subfields_____ |
1273 _section_code = _section_codes[_section_index] | 1281 _section_code = _section_codes[_section_index] |
1276 # control character are erased: end of line, tab, space | 1284 # control character are erased: end of line, tab, space |
1277 # _____section_code_____ | 1285 # _____section_code_____ |
1278 _section_code = self.delete_control_space(_section_code) | 1286 _section_code = self.delete_control_space(_section_code) |
1279 # _____section_title_____ | 1287 # _____section_title_____ |
1280 _section_title = self.delete_control_space(_section_title) | 1288 _section_title = self.delete_control_space(_section_title) |
1281 if _section_code != u"": | 1289 if _section_code != "": |
1282 _e_section_code = _section_code.encode("utf8") | 1290 _section_dict[_section_code] = _section_title |
1283 _e_section_title = _section_title.encode("utf8") | |
1284 _section_dict[_e_section_code] = _e_section_title | |
1285 _section_index = _section_index + 2 | 1291 _section_index = _section_index + 2 |
1286 self.__budget.setSheetSections(_section_dict) | 1292 self.__budget.setSheetSections(_section_dict) |
1287 self.__statistics.valid = self.__statistics.valid +1 | 1293 self.__statistics.valid = self.__statistics.valid +1 |
1288 | 1294 |
1289 else: | 1295 else: |
1297 _record_code = self.delete_control_space(field_list[0]) | 1303 _record_code = self.delete_control_space(field_list[0]) |
1298 # "#" and "##" characters at the end of the code are erased | 1304 # "#" and "##" characters at the end of the code are erased |
1299 # invalid characters are also erased | 1305 # invalid characters are also erased |
1300 _record_code = self.validateCode(_record_code) | 1306 _record_code = self.validateCode(_record_code) |
1301 _scodes_text = field_list[1] | 1307 _scodes_text = field_list[1] |
1302 if _scodes_text == u"": | 1308 if _scodes_text == "": |
1303 # TODO: rtf and html files | 1309 # TODO: rtf and html files |
1304 _str = "Html and rtf files not yet implemented in ~L record" | 1310 _uni = "Html and rtf files not yet implemented in ~L record" |
1305 print(_str.encode("utf-8") ) | 1311 print(_uni) |
1306 else: | 1312 else: |
1307 # _____Section-code_Section-text_____ | 1313 # _____Section-code_Section-text_____ |
1308 # last \ is erased | 1314 # last \ is erased |
1309 if len(_scodes_text) and _scodes_text[-1] == u"\\": | 1315 if len(_scodes_text) and _scodes_text[-1] == "\\": |
1310 _scodes_text = _scodes_text[:-1] | 1316 _scodes_text = _scodes_text[:-1] |
1311 _scodes_text = _scodes_text.split(u"\\") | 1317 _scodes_text = _scodes_text.split("\\") |
1312 _paragraph_dict = {} | 1318 _paragraph_dict = {} |
1313 _section_dict = {} | 1319 _section_dict = {} |
1314 _section_index = 0 | 1320 _section_index = 0 |
1315 while _section_index < len(_scodes_text)-1: | 1321 while _section_index < len(_scodes_text)-1: |
1316 # _____subfields_____ | 1322 # _____subfields_____ |
1318 _section_text = _scodes_text[_section_index+1] | 1324 _section_text = _scodes_text[_section_index+1] |
1319 # control character are erased: end of line, tab, space | 1325 # control character are erased: end of line, tab, space |
1320 # _____section_code_____ | 1326 # _____section_code_____ |
1321 _section_code = self.delete_control_space(_section_code) | 1327 _section_code = self.delete_control_space(_section_code) |
1322 # _____section_text_____ | 1328 # _____section_text_____ |
1323 if _section_code != u"" and _section_text != u"": | 1329 if _section_code != "" and _section_text != "": |
1324 #-# paragraph #-# | 1330 #-# paragraph #-# |
1325 _paragraph_code = _record_code + _section_code + u"*" | 1331 _paragraph_code = _record_code + _section_code + "*" |
1326 _e_paragraph_code = _paragraph_code.encode("utf8") | 1332 _paragraph_dict[_paragraph_code] = _section_text |
1327 _e_section_text = _section_text.encode("utf8") | 1333 _section_dict[_section_code] = _paragraph_code |
1328 _paragraph_dict[_e_paragraph_code] = _e_section_text | |
1329 _e_section_code = _section_code.encode("utf8") | |
1330 _section_dict[_e_section_code] = _e_paragraph_code | |
1331 _section_index = _section_index + 2 | 1334 _section_index = _section_index + 2 |
1332 self.__budget.setSheetParagraphs(_paragraph_dict) | 1335 self.__budget.setSheetParagraphs(_paragraph_dict) |
1333 self.__budget.setSheetRecord(_record_code.encode("utf8"), "*", | 1336 self.__budget.setSheetRecord(_record_code, "*", _section_dict) |
1334 _section_dict) | |
1335 self.__statistics.valid = self.__statistics.valid +1 | 1337 self.__statistics.valid = self.__statistics.valid +1 |
1336 | 1338 |
1337 def _parseQ(self, field_list): | 1339 def _parseQ(self, field_list): |
1338 """_parseQ(field_list) | 1340 """_parseQ(field_list) |
1339 | 1341 |
1358 # "#" and "##" characters at the end of the code are erased | 1360 # "#" and "##" characters at the end of the code are erased |
1359 # invalid characters are also erased | 1361 # invalid characters are also erased |
1360 _record_code = self.validateCode(_record_code) | 1362 _record_code = self.validateCode(_record_code) |
1361 _scodes_pkey = field_list[1] | 1363 _scodes_pkey = field_list[1] |
1362 # last \ is erased | 1364 # last \ is erased |
1363 if len(_scodes_pkey) and _scodes_pkey[-1] == u"\\": | 1365 if len(_scodes_pkey) and _scodes_pkey[-1] == "\\": |
1364 _scodes_pkey = _scodes_pkey[:-1] | 1366 _scodes_pkey = _scodes_pkey[:-1] |
1365 _scodes_pkey = _scodes_pkey.split(u"\\") | 1367 _scodes_pkey = _scodes_pkey.split("\\") |
1366 _field_dict = {} | 1368 _field_dict = {} |
1367 _section_index = 0 | 1369 _section_index = 0 |
1368 while _section_index < len(_scodes_pkey) -1: | 1370 while _section_index < len(_scodes_pkey) -1: |
1369 # _____subfields_____ | 1371 # _____subfields_____ |
1370 _section_code = _scodes_pkey[_section_index] | 1372 _section_code = _scodes_pkey[_section_index] |
1376 # _____section_text_____ | 1378 # _____section_text_____ |
1377 _paragraph_key = self.delete_control_space(_paragraph_key) | 1379 _paragraph_key = self.delete_control_space(_paragraph_key) |
1378 # _____Fields keys_____ | 1380 # _____Fields keys_____ |
1379 _field_keys = self.delete_control_space(_field_keys) | 1381 _field_keys = self.delete_control_space(_field_keys) |
1380 # last ; is erased | 1382 # last ; is erased |
1381 if len(_field_keys) and _field_keys[-1] == u";": | 1383 if len(_field_keys) and _field_keys[-1] == ";": |
1382 _field_keys = _field_keys[:-1] | 1384 _field_keys = _field_keys[:-1] |
1383 _field_keys_list = _scodes_pkey.split(u";") | 1385 _field_keys_list = _scodes_pkey.split(";") |
1384 for _field_key in _field_keys_list: | 1386 for _field_key in _field_keys_list: |
1385 if _field_key != u"" and _section_code != u"" and \ | 1387 if _field_key != "" and _section_code != "" and \ |
1386 _paragraph_key != u"": | 1388 _paragraph_key != "": |
1387 if _field_key in _field_dict: | 1389 if _field_key in _field_dict: |
1388 _section_dict = _field_dict[_field_key] | 1390 _section_dict = _field_dict[_field_key] |
1389 else: | 1391 else: |
1390 _section_dict = {} | 1392 _section_dict = {} |
1391 _field_dict[_field_key] = _section_dict | 1393 _field_dict[_field_key] = _section_dict |
1392 _e_section_code = _section_code.encode("utf8") | 1394 _section_dict[_section_code] = _paragraph_code |
1393 _e_paragraph_code = _paragraph_code.encode("utf8") | |
1394 _section_dict[_e_section_code] = _e_paragraph_code | |
1395 _section_index = _section_index + 3 | 1395 _section_index = _section_index + 3 |
1396 for _field, _section_dict in _field_dict.iteritems(): | 1396 for _field, _section_dict in _field_dict.items(): |
1397 self.__budget.setSheetRecord(_record_code.encode("utf8"), | 1397 self.__budget.setSheetRecord(_record_code, _field, _section_dict) |
1398 _field.encode("utf8"), _section_dict) | |
1399 self.__statistics.valid = self.__statistics.valid +1 | 1398 self.__statistics.valid = self.__statistics.valid +1 |
1400 | 1399 |
1401 def _parseJ(self, field_list): | 1400 def _parseJ(self, field_list): |
1402 """_parseJ(field_list) | 1401 """_parseJ(field_list) |
1403 | 1402 |
1420 # _____Fields_____ | 1419 # _____Fields_____ |
1421 # _____Paragraph code_____ | 1420 # _____Paragraph code_____ |
1422 _paragraph_code = self.delete_control_space(field_list[0]) | 1421 _paragraph_code = self.delete_control_space(field_list[0]) |
1423 # _____Paragraph text_____ | 1422 # _____Paragraph text_____ |
1424 _paragraph_text = field_list[1] | 1423 _paragraph_text = field_list[1] |
1425 if _paragraph_text == u"": | 1424 if _paragraph_text == "": |
1426 # TODO: rtf and html files | 1425 # TODO: rtf and html files |
1427 _str = "Html and rtf files not yet implemented in ~J record" | 1426 _uni = "Html and rtf files not yet implemented in ~J record" |
1428 print(_str.encode("utf-8") ) | 1427 print(_uni) |
1429 else: | 1428 else: |
1430 self.__budget.setSheetParagraph(paragraph_code.encode("utf8"), | 1429 self.__budget.setSheetParagraph(paragraph_code, paragraph_text) |
1431 paragraph_text.encode("utf8")) | |
1432 self.__statistics.valid = self.__statistics.valid +1 | 1430 self.__statistics.valid = self.__statistics.valid +1 |
1433 | 1431 |
1434 def _parseG(self, field_list): | 1432 def _parseG(self, field_list): |
1435 """_parseG(field_list) | 1433 """_parseG(field_list) |
1436 | 1434 |
1456 _record_code = self.validateCode(_record_code) | 1454 _record_code = self.validateCode(_record_code) |
1457 # _____Grafic files_____ | 1455 # _____Grafic files_____ |
1458 _grafic_files = self.delete_control(field_list[1]) | 1456 _grafic_files = self.delete_control(field_list[1]) |
1459 # _____subfields_____ | 1457 # _____subfields_____ |
1460 # last \ is erased | 1458 # last \ is erased |
1461 if len(_grafic_files) and _grafic_files[-1] == u"\\": | 1459 if len(_grafic_files) and _grafic_files[-1] == "\\": |
1462 _grafic_files = _grafic_files[:-1] | 1460 _grafic_files = _grafic_files[:-1] |
1463 _grafic_file_list = _grafic_files.split(u"\\") | 1461 _grafic_file_list = _grafic_files.split("\\") |
1464 _tested_grafic_file_list = [] | 1462 _tested_grafic_file_list = [] |
1465 for _grafic_file in _grafic_file_list: | 1463 for _grafic_file in _grafic_file_list: |
1466 _str_grafic_file = _grafic_file.encode("utf8") | 1464 _path = os.path.dirname(self.__filename) |
1467 _path = os.path.dirname(self.__filename).encode("utf8") | 1465 _grafic_file_path = os.path.join(_path, _grafic_file) |
1468 _grafic_file_path = os.path.join(_path, _str_grafic_file) | |
1469 if os.path.exists(_grafic_file_path): | 1466 if os.path.exists(_grafic_file_path): |
1470 _tested_grafic_file_list.append(_grafic_file_path) | 1467 _tested_grafic_file_list.append(_grafic_file_path) |
1471 else: | 1468 else: |
1472 _name_ext = os.path.splitext(_str_grafic_file) | 1469 _name_ext = os.path.splitext(_grafic_file) |
1473 _grafic_file_name = _name_ext[0] | 1470 _grafic_file_name = _name_ext[0] |
1474 _grafic_file_ext = _name_ext[1] | 1471 _grafic_file_ext = _name_ext[1] |
1475 _grafic_file_name_u = _grafic_file_name.upper() | 1472 _grafic_file_name_u = _grafic_file_name.upper() |
1476 _grafic_file_name_l = _grafic_file_name.lower() | 1473 _grafic_file_name_l = _grafic_file_name.lower() |
1477 _grafic_file_ext_u = _grafic_file_ext.upper() | 1474 _grafic_file_ext_u = _grafic_file_ext.upper() |
1491 elif os.path.exists(_grafic_file_path_lu): | 1488 elif os.path.exists(_grafic_file_path_lu): |
1492 _tested_grafic_file_list.append(_grafic_file_path_lu) | 1489 _tested_grafic_file_list.append(_grafic_file_path_lu) |
1493 elif os.path.exists(_grafic_file_path_ll): | 1490 elif os.path.exists(_grafic_file_path_ll): |
1494 _tested_grafic_file_list.append(_grafic_file_path_ll) | 1491 _tested_grafic_file_list.append(_grafic_file_path_ll) |
1495 else: | 1492 else: |
1496 print(utils.mapping(_("The file $1 do not exist"), | 1493 _tuni = _("The file $1 do not exist") |
1497 (_grafic_file_path.decode("utf8"),)).encode("utf-8") ) | 1494 _uni = utils.mapping(_tuni, (_grafic_file_path,)) |
1495 print(_uni) | |
1498 if len(_grafic_file_list) > 0: | 1496 if len(_grafic_file_list) > 0: |
1499 for _grafic_file in _tested_grafic_file_list: | 1497 for _grafic_file in _tested_grafic_file_list: |
1500 self.__budget.addFile(_record_code.encode("utf8"), | 1498 self.__budget.addFile(_record_code, _grafic_file, "img", "") |
1501 _grafic_file, "img", "") | |
1502 self.__statistics.valid = self.__statistics.valid +1 | 1499 self.__statistics.valid = self.__statistics.valid +1 |
1503 | 1500 |
1504 def _parseE(self, field_list): | 1501 def _parseE(self, field_list): |
1505 """_parseE(field_list) | 1502 """_parseE(field_list) |
1506 | 1503 |
1521 if len(field_list) > 6: | 1518 if len(field_list) > 6: |
1522 field_list = field_list[1:6] | 1519 field_list = field_list[1:6] |
1523 # If there are no sufficient fields, the fields are added | 1520 # If there are no sufficient fields, the fields are added |
1524 # with empty value:"" | 1521 # with empty value:"" |
1525 else: | 1522 else: |
1526 field_list = field_list[1:] + [u""]*(6-len(field_list)) | 1523 field_list = field_list[1:] + [""]*(6-len(field_list)) |
1527 # _____Fields_____ | 1524 # _____Fields_____ |
1528 # _____company Code_____ | 1525 # _____company Code_____ |
1529 _company_code = self.delete_control_space(field_list[0]) | 1526 _company_code = self.delete_control_space(field_list[0]) |
1530 if _company_code == u"": | 1527 if _company_code == "": |
1531 return | 1528 return |
1532 # _____Summary_____ | 1529 # _____Summary_____ |
1533 | 1530 |
1534 _sumamary = self.delete_control(field_list[1]) | 1531 _sumamary = self.delete_control(field_list[1]) |
1535 # _____Name_____ | 1532 # _____Name_____ |
1536 _name = self.delete_control(field_list[2]) | 1533 _name = self.delete_control(field_list[2]) |
1537 # _____local_offices_____ | 1534 # _____local_offices_____ |
1538 _local_offices = self.delete_control(field_list[3]) | 1535 _local_offices = self.delete_control(field_list[3]) |
1539 # _____subfields of local_offices_____ | 1536 # _____subfields of local_offices_____ |
1540 # last \ is erased | 1537 # last \ is erased |
1541 if len(_local_offices) and _local_offices[-1] == u"\\": | 1538 if len(_local_offices) and _local_offices[-1] == "\\": |
1542 _local_offices = _local_offices[:-1] | 1539 _local_offices = _local_offices[:-1] |
1543 _local_offices_list = _local_offices.split(u"\\") | 1540 _local_offices_list = _local_offices.split("\\") |
1544 # If there are no sufficent subfields, the subfields are added | 1541 # If there are no sufficent subfields, the subfields are added |
1545 # whith empty value | 1542 # whith empty value |
1546 _nsub = len(_local_offices_list) % 10 | 1543 _nsub = len(_local_offices_list) % 10 |
1547 if _nsub != 0: | 1544 if _nsub != 0: |
1548 _local_offices_list = _local_offices_list + \ | 1545 _local_offices_list = _local_offices_list + \ |
1549 [u""]*(10-len(field_list)) | 1546 [""]*(10-len(field_list)) |
1550 _local_offices = [] | 1547 _local_offices = [] |
1551 _local_offices_index = 0 | 1548 _local_offices_index = 0 |
1552 while _local_offices_index < len(_local_offices_list)-9: | 1549 while _local_offices_index < len(_local_offices_list)-9: |
1553 # _____subfields_____ | 1550 # _____subfields_____ |
1554 _type = _local_offices_list[_local_offices_index] | 1551 _type = _local_offices_list[_local_offices_index] |
1558 _town = _local_offices_list[_local_offices_index+4] | 1555 _town = _local_offices_list[_local_offices_index+4] |
1559 _province = _local_offices_list[_local_offices_index+5] | 1556 _province = _local_offices_list[_local_offices_index+5] |
1560 _country = _local_offices_list[_local_offices_index+6] | 1557 _country = _local_offices_list[_local_offices_index+6] |
1561 _phone = _local_offices_list[_local_offices_index+7] | 1558 _phone = _local_offices_list[_local_offices_index+7] |
1562 # last ; is erased | 1559 # last ; is erased |
1563 if len(_phone) and _phone[-1] == u";": | 1560 if len(_phone) and _phone[-1] == ";": |
1564 _phone = _phone[:-1] | 1561 _phone = _phone[:-1] |
1565 _phone_list = _phone.split(u";") | 1562 _phone_list = _phone.split(";") |
1566 _phone_list = [_phone.encode("utf8") for _phone in _phone_list] | |
1567 _fax = _local_offices_list[_local_offices_index+8] | 1563 _fax = _local_offices_list[_local_offices_index+8] |
1568 # last ; is erased | 1564 # last ; is erased |
1569 if len(_fax) and _fax[-1] == u";": | 1565 if len(_fax) and _fax[-1] == ";": |
1570 _fax = _fax[:-1] | 1566 _fax = _fax[:-1] |
1571 _fax_list = _fax.split(u";") | 1567 _fax_list = _fax.split(";") |
1572 _fax_list = [_fax.encode("utf8") for _fax in _fax_list] | |
1573 _contact_person = _local_offices_list[_local_offices_index+9] | 1568 _contact_person = _local_offices_list[_local_offices_index+9] |
1574 if _type != u"" or _subname != u"" or _address != u"" or \ | 1569 if _type != "" or _subname != "" or _address != "" or \ |
1575 _postal_code != u"" or _town != u"" or _province != u"" or \ | 1570 _postal_code != "" or _town != "" or _province != "" or \ |
1576 _country != u"" or _phone != u"" or _fax != u"" or \ | 1571 _country != "" or _phone != "" or _fax != "" or \ |
1577 _contact_person != u"": | 1572 _contact_person != "": |
1578 _local_offices.append([_type.encode("utf8"), | 1573 _local_offices.append([_type, |
1579 _subname.encode("utf8"), | 1574 _subname, |
1580 _address.encode("utf8"), | 1575 _address, |
1581 _postal_code.encode("utf8"), | 1576 _postal_code, |
1582 _town.encode("utf8"), | 1577 _town, |
1583 _province.encode("utf8"), | 1578 _province, |
1584 _country.encode("utf8"), | 1579 _country, |
1585 _phone_list, | 1580 _phone_list, |
1586 _fax_list, | 1581 _fax_list, |
1587 _contact_person.encode("utf8")]) | 1582 _contact_person]) |
1588 _local_offices_index = _local_offices_index + 10 | 1583 _local_offices_index = _local_offices_index + 10 |
1589 # _____cif web email_____ | 1584 # _____cif web email_____ |
1590 _c_w_e = self.delete_control_space(field_list[4]) | 1585 _c_w_e = self.delete_control_space(field_list[4]) |
1591 # last \ is erased | 1586 # last \ is erased |
1592 if len(_c_w_e) and _c_w_e[-1] == u"\\": | 1587 if len(_c_w_e) and _c_w_e[-1] == "\\": |
1593 _c_w_e = _c_w_e[:-1] | 1588 _c_w_e = _c_w_e[:-1] |
1594 _c_w_e_list = _c_w_e.split(u"\\") | 1589 _c_w_e_list = _c_w_e.split("\\") |
1595 # _____subfields_____ | 1590 # _____subfields_____ |
1596 # If there are no sufficient fields, the fields are added | 1591 # If there are no sufficient fields, the fields are added |
1597 # with empty value:"" | 1592 # with empty value:"" |
1598 _c_w_e_list = _c_w_e_list + [u""]*(3-len(_c_w_e_list)) | 1593 _c_w_e_list = _c_w_e_list + [""]*(3-len(_c_w_e_list)) |
1599 _cif = _c_w_e_list[0] | 1594 _cif = _c_w_e_list[0] |
1600 _web = _c_w_e_list[1] | 1595 _web = _c_w_e_list[1] |
1601 _email = _c_w_e_list[2] | 1596 _email = _c_w_e_list[2] |
1602 self.__budget.setCompany(_company_code.encode("utf8"), | 1597 self.__budget.setCompany(_company_code, |
1603 _sumamary.encode("utf8"), _name.encode("utf8"), | 1598 _sumamary, _name, |
1604 _local_offices, _cif.encode("utf8"), | 1599 _local_offices, _cif, |
1605 _web.encode("utf8"), _email.encode("utf8")) | 1600 _web, _email) |
1606 self.__statistics.valid = self.__statistics.valid +1 | 1601 self.__statistics.valid = self.__statistics.valid +1 |
1607 | 1602 |
1608 def _parseX(self, field_list): | 1603 def _parseX(self, field_list): |
1609 """_parseX(field_list) | 1604 """_parseX(field_list) |
1610 | 1605 |
1629 field_list = field_list[1:] | 1624 field_list = field_list[1:] |
1630 # _____Fields_____ | 1625 # _____Fields_____ |
1631 # "control": "[\t \n\r]" | 1626 # "control": "[\t \n\r]" |
1632 _field_1 = self.delete_control_space(field_list[0]) | 1627 _field_1 = self.delete_control_space(field_list[0]) |
1633 _field_2 = self.delete_control_space(field_list[1]) | 1628 _field_2 = self.delete_control_space(field_list[1]) |
1634 if _field_1 == u"": | 1629 if _field_1 == "": |
1635 # A) | 1630 # A) |
1636 _field_2_list = _field_2.split(u"\\") | 1631 _field_2_list = _field_2.split("\\") |
1637 _ti_index = 0 | 1632 _ti_index = 0 |
1638 while _ti_index < len(_field_2_list)-3: | 1633 while _ti_index < len(_field_2_list)-3: |
1639 _ti_code = _field_2_list[_ti_index] | 1634 _ti_code = _field_2_list[_ti_index] |
1640 _ti_description = _field_2_list[_ti_index+1] | 1635 _ti_description = _field_2_list[_ti_index+1] |
1641 _ti_unit = _field_2_list[_ti_index+2] | 1636 _ti_unit = _field_2_list[_ti_index+2] |
1642 if _ti_code != "": | 1637 if _ti_code != "": |
1643 self.__budget.addTecInfo(_ti_code.encode("utf8"), | 1638 self.__budget.addTecInfo(_ti_code, |
1644 _ti_description.encode("utf8"), | 1639 _ti_description, |
1645 _ti_unit.encode("utf8")) | 1640 _ti_unit) |
1646 _ti_index = _ti_index + 3 | 1641 _ti_index = _ti_index + 3 |
1647 else: | 1642 else: |
1648 # B) | 1643 # B) |
1649 # "#" and "##" characters at the end of the code are erased | 1644 # "#" and "##" characters at the end of the code are erased |
1650 # invalid characters are also erased | 1645 # invalid characters are also erased |
1651 _record_code = self.validateCode(_field_1) | 1646 _record_code = self.validateCode(_field_1) |
1652 _field_2_list = _field_2.split(u"\\") | 1647 _field_2_list = _field_2.split("\\") |
1653 _ti_index = 0 | 1648 _ti_index = 0 |
1654 _ti_dict = {} | 1649 _ti_dict = {} |
1655 while _ti_index < len(_field_2_list)-2: | 1650 while _ti_index < len(_field_2_list)-2: |
1656 _ti_code = _field_2_list[_ti_index] | 1651 _ti_code = _field_2_list[_ti_index] |
1657 _ti_value = _field_2_list[_ti_index+1] | 1652 _ti_value = _field_2_list[_ti_index+1] |
1658 if _ti_code != u"" and _ti_value != u"": | 1653 if _ti_code != "" and _ti_value != "": |
1659 _ti_dict[_ti_code.encode("utf8")] = _ti_value.encode("utf8") | 1654 _ti_dict[_ti_code] = _ti_value |
1660 _ti_index = _ti_index + 2 | 1655 _ti_index = _ti_index + 2 |
1661 self.__budget.setTecnicalInformation(_record_code.encode("utf8"), | 1656 self.__budget.setTecnicalInformation(_record_code, _ti_dict) |
1662 _ti_dict) | |
1663 self.__statistics.valid = self.__statistics.valid +1 | 1657 self.__statistics.valid = self.__statistics.valid +1 |
1664 | 1658 |
1665 def _parseF(self, field_list): | 1659 def _parseF(self, field_list): |
1666 """_parseF(field_list) | 1660 """_parseF(field_list) |
1667 | 1661 |
1687 _record_code = self.validateCode(_record_code) | 1681 _record_code = self.validateCode(_record_code) |
1688 # _____Grafic files_____ | 1682 # _____Grafic files_____ |
1689 _files = self.delete_control(field_list[1]) | 1683 _files = self.delete_control(field_list[1]) |
1690 # _____subfields_____ | 1684 # _____subfields_____ |
1691 # last \ is erased | 1685 # last \ is erased |
1692 if len(_files) and _files[-1] == u"\\": | 1686 if len(_files) and _files[-1] == "\\": |
1693 _files = _files[:-1] | 1687 _files = _files[:-1] |
1694 _files_list = _files.split(u"\\") | 1688 _files_list = _files.split("\\") |
1695 # adding empty subfiels if necesary | 1689 # adding empty subfiels if necesary |
1696 if len(_files_list)%3 > 0: | 1690 if len(_files_list)%3 > 0: |
1697 _files_list.extend[u""]*(3 - len(_files_list)%3) | 1691 _files_list.extend[""]*(3 - len(_files_list)%3) |
1698 _file_index = 0 | 1692 _file_index = 0 |
1699 _tested_files_list = [] | 1693 _tested_files_list = [] |
1700 while _file_index < len(_files_list)-3: | 1694 while _file_index < len(_files_list)-3: |
1701 _type = _files_list[_file_index].replace(u" ",u"") | 1695 _type = _files_list[_file_index].replace(" ","") |
1702 ## _types = { | 1696 ## _types = { |
1703 ## "0": _("others"), | 1697 ## "0": _("others"), |
1704 ## "1": _("características técnicas y de fabricación"), | 1698 ## "1": _("características técnicas y de fabricación"), |
1705 ## "2": _("manual de colocación, uso y mantenimiento"), | 1699 ## "2": _("manual de colocación, uso y mantenimiento"), |
1706 ## "3": _("certificado/s de elementos y sistemas"), | 1700 ## "3": _("certificado/s de elementos y sistemas"), |
1712 ## "9": _("cálculo de elementos y sistemas"), | 1706 ## "9": _("cálculo de elementos y sistemas"), |
1713 ## "10": _("presentación, datos generales, objetivos, " \ | 1707 ## "10": _("presentación, datos generales, objetivos, " \ |
1714 ## "etc. de empresa"), | 1708 ## "etc. de empresa"), |
1715 ## "11": _("certificado/s de empresa"), | 1709 ## "11": _("certificado/s de empresa"), |
1716 ## "12": _("obras realizadas")} | 1710 ## "12": _("obras realizadas")} |
1717 _types = [u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", | 1711 _types = ["0", "1", "2", "3", "4", "5", "6", "7", "8", |
1718 u"9", u"10", u"11", u"12"] | 1712 "9", "10", "11", "12"] |
1719 if not _type in _types: | 1713 if not _type in _types: |
1720 _type = u"0" | 1714 _type = "0" |
1721 _filenames = _files_list[_file_index + 1] | 1715 _filenames = _files_list[_file_index + 1] |
1722 _description = _files_list[_file_index + 2] | 1716 _description = _files_list[_file_index + 2] |
1723 _file_index += 3 | 1717 _file_index += 3 |
1724 if len(_filenames) and _filenames[-1] == u";": | 1718 if len(_filenames) and _filenames[-1] == ";": |
1725 _files = _files[:-1] | 1719 _files = _files[:-1] |
1726 _filenames_list = _filenames.split(u";") | 1720 _filenames_list = _filenames.split(";") |
1727 | 1721 |
1728 _path = os.path.dirname(self.__filename) | 1722 _path = os.path.dirname(self.__filename) |
1729 for _filename in _filenames_list: | 1723 for _filename in _filenames_list: |
1730 _file_path = os.path.join(_path, _filename.encode("utf8")) | 1724 _file_path = os.path.join(_path, _filename) |
1731 if os.path.exists(_file_path): | 1725 if os.path.exists(_file_path): |
1732 _tested_files_list.append([_file_path, _type.encode("utf8"), | 1726 _tested_files_list.append([_file_path, _type, |
1733 _description.encode("utf8")]) | 1727 _description]) |
1734 else: | 1728 else: |
1735 _name_ext = os.path.splitext(_filename) | 1729 _name_ext = os.path.splitext(_filename) |
1736 _file_name = _name_ext[0] | 1730 _file_name = _name_ext[0] |
1737 _file_ext = _name_ext[1] | 1731 _file_ext = _name_ext[1] |
1738 _file_name_u = _file_name.upper() | 1732 _file_name_u = _file_name.upper() |
1747 _file_path_ul = os.path.join(_path, _ul) | 1741 _file_path_ul = os.path.join(_path, _ul) |
1748 _file_path_lu = os.path.join(_path, _lu) | 1742 _file_path_lu = os.path.join(_path, _lu) |
1749 _file_path_ll = os.path.join(_path, _ll) | 1743 _file_path_ll = os.path.join(_path, _ll) |
1750 if os.path.exists(_file_path_uu): | 1744 if os.path.exists(_file_path_uu): |
1751 _tested_files_list.append([_file_path_uu, | 1745 _tested_files_list.append([_file_path_uu, |
1752 _type.encode("utf8"), | 1746 _type, |
1753 _description.encode("utf8")]) | 1747 _description]) |
1754 elif os.path.exists(_grafic_file_path_ul): | 1748 elif os.path.exists(_grafic_file_path_ul): |
1755 _tested_files_list.append([_file_path_ul, | 1749 _tested_files_list.append([_file_path_ul, |
1756 _type.encode("utf8"), | 1750 _type, |
1757 _description.encode("utf8")]) | 1751 _description]) |
1758 elif os.path.exists(_grafic_file_path_lu): | 1752 elif os.path.exists(_grafic_file_path_lu): |
1759 _tested_files_list.append([_file_path_lu, | 1753 _tested_files_list.append([_file_path_lu, |
1760 _type.encode("utf8"), | 1754 _type, |
1761 _description.encode("utf8")]) | 1755 _description]) |
1762 elif os.path.exists(_grafic_file_path_ll): | 1756 elif os.path.exists(_grafic_file_path_ll): |
1763 _tested_files_list.append([_file_path_ll, | 1757 _tested_files_list.append([_file_path_ll, |
1764 _type.encode("utf8"), | 1758 _type, |
1765 _description.encode("utf8")]) | 1759 _description]) |
1766 else: | 1760 else: |
1767 print(utils.mapping(_("The file $1 do not exist"), | 1761 _tuni = _("The file $1 do not exist") |
1768 (_file_path,)).encode("utf-8") ) | 1762 _uni = utils.mapping(_tuni, (_file_path,)) |
1763 print(_uni) | |
1769 if len(_tested_files_list) > 0: | 1764 if len(_tested_files_list) > 0: |
1770 for _file in _tested_files_list: | 1765 for _file in _tested_files_list: |
1771 self.__budget.addFile(_record_code.encode("utf8"), _file[0], | 1766 self.__budget.addFile(_record_code, _file[0], |
1772 _file[1], _file[2]) | 1767 _file[1], _file[2]) |
1773 self.__statistics.valid = self.__statistics.valid +1 | 1768 self.__statistics.valid = self.__statistics.valid +1 |
1774 | 1769 |
1775 def _parseB(self, field_list): | 1770 def _parseB(self, field_list): |
1776 """_parseB(field_list) | 1771 """_parseB(field_list) |
1827 # invalid characters are also erased | 1822 # invalid characters are also erased |
1828 _code = self.validateCode(_code) | 1823 _code = self.validateCode(_code) |
1829 # _____Labels_____ | 1824 # _____Labels_____ |
1830 # last \ is erased | 1825 # last \ is erased |
1831 # TODO: change the others parsers to this: | 1826 # TODO: change the others parsers to this: |
1832 while len(_labels) > 0 and _labels[-1] == u"\\": | 1827 while len(_labels) > 0 and _labels[-1] == "\\": |
1833 _labels = _labels[:-1] | 1828 _labels = _labels[:-1] |
1834 # replace "_" to " " | 1829 # replace "_" to " " |
1835 _labels = _labels.replace(u"_",u" ") | 1830 _labels = _labels.replace("_"," ") |
1836 _label_list = _labels.split(u"\\") | 1831 _label_list = _labels.split("\\") |
1837 for _label in _label_list: | 1832 for _label in _label_list: |
1838 self.__budget.addLabel(_code.encode("utf8"), _label.encode("utf8")) | 1833 self.__budget.addLabel(_code, _label) |
1839 self.__statistics.valid = self.__statistics.valid + 1 | 1834 self.__statistics.valid = self.__statistics.valid + 1 |
1840 | 1835 |
1841 def _parseP(self, field_list): | 1836 def _parseP(self, field_list): |
1842 """_parseP(field_list) | 1837 """_parseP(field_list) |
1843 | 1838 |
1854 """ | 1849 """ |
1855 # TODO: Use global parametric record | 1850 # TODO: Use global parametric record |
1856 if len(field_list) > 2: | 1851 if len(field_list) > 2: |
1857 # delete control caracters and spaces | 1852 # delete control caracters and spaces |
1858 _family_code = self.delete_control_space(field_list[1]) | 1853 _family_code = self.delete_control_space(field_list[1]) |
1859 if _family_code == u"": # A)Global paremetric record | 1854 if _family_code == "": # A)Global paremetric record |
1860 # The record must have 3 or 4 fields | 1855 # The record must have 3 or 4 fields |
1861 if len(field_list) > 4: | 1856 if len(field_list) > 4: |
1862 field_list = field_list[0:4] | 1857 field_list = field_list[0:4] |
1863 field_list = field_list[1:] | 1858 field_list = field_list[1:] |
1864 if len(field_list) == 2: | 1859 if len(field_list) == 2: |
1865 field_list.append(u"") | 1860 field_list.append("") |
1866 if len(field_list) != 3: | 1861 if len(field_list) != 3: |
1867 return | 1862 return |
1868 else: # B)Family Parametric record | 1863 else: # B)Family Parametric record |
1869 # The record must have 3 fields | 1864 # The record must have 3 fields |
1870 if len(field_list) > 3: | 1865 if len(field_list) > 3: |
1871 field_list = field_list[0:3] | 1866 field_list = field_list[0:3] |
1872 field_list = field_list[1:] | 1867 field_list = field_list[1:] |
1873 if len(field_list) != 2: | 1868 if len(field_list) != 2: |
1874 print(_("PyArq hates parametric DLLs").encode("utf-8") ) | 1869 _tuni = _("PyArq hates parametric DLLs") |
1870 print(_uni) | |
1875 return | 1871 return |
1876 else: | 1872 else: |
1877 return | 1873 return |
1878 # _____Description_____ | 1874 # _____Description_____ |
1879 _description = field_list[1] | 1875 _description = field_list[1] |
1880 if _description == u"": | 1876 if _description == "": |
1881 print(_("PyArq hates parametric DLLs").encode("utf-8") ) | 1877 _tuni = _("PyArq hates parametric DLLs") |
1878 print(_tuni) | |
1882 return | 1879 return |
1883 # Adding last end of line | 1880 # Adding last end of line |
1884 _description = _description + u"\r\n" | 1881 _description = _description + "\r\n" |
1885 # Delete comments | 1882 # Delete comments |
1886 # "comment" : "#.*\r\n" | 1883 # "comment" : "#.*\r\n" |
1887 _description = self.__pattern["comment"].sub(u"\r\n",_description) | 1884 _description = self.__pattern["comment"].sub("\r\n",_description) |
1888 # Tabs to spaces | 1885 # Tabs to spaces |
1889 _description = _description.replace(u"\t",u" ") | 1886 _description = _description.replace("\t"," ") |
1890 # Delete empty lines | 1887 # Delete empty lines |
1891 # "empty_line": r"(\r\n) *\r\n" | 1888 # "empty_line": r"(\r\n) *\r\n" |
1892 while self.__pattern["empty_line"].search(_description): | 1889 while self.__pattern["empty_line"].search(_description): |
1893 _description = self.__pattern["empty_line"].sub( | 1890 _description = self.__pattern["empty_line"].sub( |
1894 lambda x: x.groups()[0], _description) | 1891 lambda x: x.groups()[0], _description) |
1895 # Delete spaces before and after / | 1892 # Delete spaces before and after / |
1896 # "space_before_backslash" : r"( )+\\" | 1893 # "space_before_backslash" : r"( )+\\" |
1897 _description = self.__pattern["space_before_backslash"].sub( | 1894 _description = self.__pattern["space_before_backslash"].sub( |
1898 ur"\\",_description) | 1895 r"\\",_description) |
1899 # "space_after_backslash" : r"\\( )+" | 1896 # "space_after_backslash" : r"\\( )+" |
1900 _description = self.__pattern["space_after_backslash"].sub( | 1897 _description = self.__pattern["space_after_backslash"].sub( |
1901 ur"\\",_description) | 1898 r"\\",_description) |
1902 # Join lines that start but not end with / | 1899 # Join lines that start but not end with / |
1903 _description = u"\r\n" + _description # add leading end of line | 1900 _description = "\r\n" + _description # add leading end of line |
1904 # "start_noend_backslash": "(\r\n\\\.*[^\\\])\r\n" | 1901 # "start_noend_backslash": "(\r\n\\\.*[^\\\])\r\n" |
1905 while self.__pattern["start_noend_backslash"].search(_description): | 1902 while self.__pattern["start_noend_backslash"].search(_description): |
1906 _description = self.__pattern["start_noend_backslash"].sub( | 1903 _description = self.__pattern["start_noend_backslash"].sub( |
1907 lambda x: x.groups()[0], _description) | 1904 lambda x: x.groups()[0], _description) |
1908 # Join lines that end with a + - * / ^ and @ & < > <= >= = <> ! | 1905 # Join lines that end with a + - * / ^ and @ & < > <= >= = <> ! |
1914 while self.__pattern["matricial_var"].search(_description): | 1911 while self.__pattern["matricial_var"].search(_description): |
1915 _description = self.__pattern["matricial_var"].sub( | 1912 _description = self.__pattern["matricial_var"].sub( |
1916 lambda x: x.groups()[0], _description) | 1913 lambda x: x.groups()[0], _description) |
1917 _description = _description[2:] # remove leading end of line | 1914 _description = _description[2:] # remove leading end of line |
1918 #_description = re.sub(r"\\( )+",r"\\",_description) | 1915 #_description = re.sub(r"\\( )+",r"\\",_description) |
1919 _lines = _description.split(u"\r\n") | 1916 _lines = _description.split("\r\n") |
1920 _final_description = u"" | 1917 _final_description = "" |
1921 _pass_line = 0 | 1918 _pass_line = 0 |
1922 for index in range(len(_lines)): | 1919 for index in range(len(_lines)): |
1923 _line = _lines[index] | 1920 _line = _lines[index] |
1924 # Parse lines | 1921 # Parse lines |
1925 if len(_line) != 0: # Delete empty lines | 1922 if len(_line) != 0: # Delete empty lines |
1926 if _pass_line > 0: | 1923 if _pass_line > 0: |
1927 _pass_line = _pass_line -1 | 1924 _pass_line = _pass_line -1 |
1928 _line = u"" | 1925 _line = "" |
1929 elif _line.isspace(): | 1926 elif _line.isspace(): |
1930 _line = u"" | 1927 _line = "" |
1931 elif _line[0] != u"\\": | 1928 elif _line[0] != "\\": |
1932 # Delete spaces out "" delimiter | 1929 # Delete spaces out "" delimiter |
1933 _list = _line.split(u'"') | 1930 _list = _line.split(u'"') |
1934 _final_line = u"" | 1931 _final_line = "" |
1935 for index1 in range(len(_list)): | 1932 for index1 in range(len(_list)): |
1936 if index1 % 2 != 0: | 1933 if index1 % 2 != 0: |
1937 _parcial_line = u'"' + _list[index1] | 1934 _parcial_line = u'"' + _list[index1] |
1938 else: | 1935 else: |
1939 _parcial_line = _list[index1].replace(u" ",u"") | 1936 _parcial_line = _list[index1].replace(" ","") |
1940 _parcial_line = u'"' + _parcial_line | 1937 _parcial_line = u'"' + _parcial_line |
1941 _final_line = _final_line + _parcial_line | 1938 _final_line = _final_line + _parcial_line |
1942 _line = _final_line[1:] | 1939 _line = _final_line[1:] |
1943 _lines[index] = _line | 1940 _lines[index] = _line |
1944 # parse data | 1941 # parse data |
1945 if len(_line) > 2 and _line[:2] == u"::": | 1942 if len(_line) > 2 and _line[:2] == "::": |
1946 # Delete spaces out " delimiter | 1943 # Delete spaces out " delimiter |
1947 #print("__PRECIO__" + _line[2:]) | 1944 #print("__PRECIO__" + _line[2:]) |
1948 pass | 1945 pass |
1949 elif len(_line) > 2 and _line[:2] == u"%:": | 1946 elif len(_line) > 2 and _line[:2] == "%:": |
1950 # Delete spaces out " delimiter | 1947 # Delete spaces out " delimiter |
1951 #print("__%AUX__" + _line[2:]) | 1948 #print("__%AUX__" + _line[2:]) |
1952 pass | 1949 pass |
1953 elif len(_line) > 3 and _line[:2] == u"%%:": | 1950 elif len(_line) > 3 and _line[:2] == "%%:": |
1954 # Delete spaces out " delimiter | 1951 # Delete spaces out " delimiter |
1955 #print("__%%AUX__" + _line[2:] ) | 1952 #print("__%%AUX__" + _line[2:] ) |
1956 pass | 1953 pass |
1957 elif self.__pattern["var"].search(_line): | 1954 elif self.__pattern["var"].search(_line): |
1958 # Delete spaces out " delimiter | 1955 # Delete spaces out " delimiter |
1959 #print( "line =", _line ) | 1956 #print( "line =", _line ) |
1960 while _line.count(u'"') % 2 == 1 and \ | 1957 while _line.count('"') % 2 == 1 and \ |
1961 index + _pass_line + 1 < len(_lines) -1: | 1958 index + _pass_line + 1 < len(_lines) -1: |
1962 _line = _line + _lines[index + _pass_line + 1] | 1959 _line = _line + _lines[index + _pass_line + 1] |
1963 _pass_line = _pass_line + 1 | 1960 _pass_line = _pass_line + 1 |
1964 _search = self.__pattern["var"].search(_line) | 1961 _search = self.__pattern["var"].search(_line) |
1965 if _search is not None: | 1962 if _search is not None: |
1966 _var0 = _search.groups()[0] | 1963 _var0 = _search.groups()[0] |
1967 _var1 = _search.groups()[1] | 1964 _var1 = _search.groups()[1] |
1968 _var = _var0 + u" = " + _var1 | 1965 _var = _var0 + " = " + _var1 |
1969 #print("__VAR__" + str(_var) ) | 1966 #print("__VAR__" + str(_var) ) |
1970 pass | 1967 pass |
1971 else: | 1968 else: |
1972 #print( "no __VAR__", _line ) | 1969 #print( "no __VAR__", _line ) |
1973 pass | 1970 pass |
1976 #_patern = "(^[^:]*):(.*)$" | 1973 #_patern = "(^[^:]*):(.*)$" |
1977 _search = self.__pattern["descomposition"].search(_line) | 1974 _search = self.__pattern["descomposition"].search(_line) |
1978 if _search is not None: | 1975 if _search is not None: |
1979 _var0 = _search.groups()[0] | 1976 _var0 = _search.groups()[0] |
1980 _var1 = _search.groups()[1] | 1977 _var1 = _search.groups()[1] |
1981 _var = _var0 + u":" + _var1 | 1978 _var = _var0 + ":" + _var1 |
1982 #print( "__Descomposición__" + str(_var) ) | 1979 #print( "__Descomposición__" + str(_var) ) |
1983 pass | 1980 pass |
1984 else: | 1981 else: |
1985 #print("no __Descomposición__", _line ) | 1982 #print("no __Descomposición__", _line ) |
1986 pass | 1983 pass |
1987 else: | 1984 else: |
1988 _str = "Parametric: code: " + \ | 1985 _tuni = _("Parametric: code: $1") |
1989 _family_code.encode("utf8") | 1986 _uni = utils.mapping(_tuni, (_family_code,)) |
1990 print(_str.encode("utf-8") ) | 1987 print(_uni) |
1991 _str = "******* Desconocido *** : " + _line | 1988 _tuni = _("******* Desconocido *** : $1") |
1992 print(_str.encode("utf-8") ) | 1989 _uni = utils.mapping(_tuni, (_line,)) |
1990 print(_uni) | |
1993 if index-10 > 0: | 1991 if index-10 > 0: |
1994 print("-11 : " + _lines[index-11].encode("utf8") ) | 1992 print("-11 : " + _lines[index-11] ) |
1995 if index-10 > 0: | 1993 if index-10 > 0: |
1996 print("-10 : " + _lines[index-10].encode("utf8") ) | 1994 print("-10 : " + _lines[index-10] ) |
1997 if index-9 > 0: | 1995 if index-9 > 0: |
1998 print("-9 : " + _lines[index-9].encode("utf8") ) | 1996 print("-9 : " + _lines[index-9] ) |
1999 if index-8 > 0: | 1997 if index-8 > 0: |
2000 print("-8 : " + _lines[index-8].encode("utf8") ) | 1998 print("-8 : " + _lines[index-8] ) |
2001 if index-7 > 0: | 1999 if index-7 > 0: |
2002 print("-7 : " + _lines[index-7].encode("utf8") ) | 2000 print("-7 : " + _lines[index-7] ) |
2003 if index-6 > 0: | 2001 if index-6 > 0: |
2004 print("-6 : " + _lines[index-6].encode("utf8") ) | 2002 print("-6 : " + _lines[index-6] ) |
2005 if index-5 > 0: | 2003 if index-5 > 0: |
2006 print("-5 : " + _lines[index-5].encode("utf8") ) | 2004 print("-5 : " + _lines[index-5] ) |
2007 if index-4 > 0: | 2005 if index-4 > 0: |
2008 print("-4 : " + _lines[index-4].encode("utf8") ) | 2006 print("-4 : " + _lines[index-4] ) |
2009 if index-3 > 0: | 2007 if index-3 > 0: |
2010 print("-3 : " + _lines[index-3].encode("utf8") ) | 2008 print("-3 : " + _lines[index-3] ) |
2011 if index-2 > 0: | 2009 if index-2 > 0: |
2012 print("-2 : " + _lines[index-2].encode("utf8") ) | 2010 print("-2 : " + _lines[index-2] ) |
2013 if index-1 > 0: | 2011 if index-1 > 0: |
2014 print("-1 : " + _lines[index-1].encode("utf8") ) | 2012 print("-1 : " + _lines[index-1] ) |
2015 print(("-0 :" + _lines[index-0]).encode("utf-8") ) | 2013 print(("-0 :" + _lines[index-0]) ) |
2016 pass | 2014 pass |
2017 else: | 2015 else: |
2018 _parameter_list = _line.split(u"\\")[1:-1] | 2016 _parameter_list = _line.split("\\")[1:-1] |
2019 if len(_parameter_list) >= 2: | 2017 if len(_parameter_list) >= 2: |
2020 if _parameter_list[0] == u"C" or \ | 2018 if _parameter_list[0] == "C" or \ |
2021 _parameter_list[0] == u"COMENTARIO": | 2019 _parameter_list[0] == "COMENTARIO": |
2022 #print( "__COMENTARIO__" + _parameter_list[1]) | 2020 #print( "__COMENTARIO__" + _parameter_list[1]) |
2023 self.__budget.setParametricSelectComment( | 2021 self.__budget.setParametricSelectComment( |
2024 _family_code.encode("utf8"), | 2022 _family_code, |
2025 _parameter_list[1].encode("utf8")) | 2023 _parameter_list[1]) |
2026 elif _parameter_list[0] == u"R" or \ | 2024 elif _parameter_list[0] == "R" or \ |
2027 _parameter_list[0] == u"RESUMEN": | 2025 _parameter_list[0] == "RESUMEN": |
2028 #print( "__RESUMEN__" + _parameter_list[1]) | 2026 #print( "__RESUMEN__" + _parameter_list[1]) |
2029 self.__budget.setParametricSummary( | 2027 self.__budget.setParametricSummary( |
2030 _family_code.encode("utf8"), | 2028 _family_code, |
2031 _parameter_list[1].encode("utf8")) | 2029 _parameter_list[1]) |
2032 elif _parameter_list[0] == u"T" or \ | 2030 elif _parameter_list[0] == "T" or \ |
2033 _parameter_list[0] == u"TEXTO": | 2031 _parameter_list[0] == "TEXTO": |
2034 #print( "__TEXTO__" + _parameter_list[1]) | 2032 #print( "__TEXTO__" + _parameter_list[1]) |
2035 self.__budget.setParametricText( | 2033 self.__budget.setParametricText( |
2036 _family_code.encode("utf8"), | 2034 _family_code, |
2037 _parameter_list[1].encode("utf8")) | 2035 _parameter_list[1]) |
2038 elif _parameter_list[0] == u"P" or \ | 2036 elif _parameter_list[0] == "P" or \ |
2039 _parameter_list[0] == u"PLIEGO": | 2037 _parameter_list[0] == "PLIEGO": |
2040 #print( "__PLIEGO__" + str(_parameter_list[1:]) ) | 2038 #print( "__PLIEGO__" + str(_parameter_list[1:]) ) |
2041 pass | 2039 pass |
2042 elif _parameter_list[0] == u"K" or \ | 2040 elif _parameter_list[0] == "K" or \ |
2043 _parameter_list[0] == u"CLAVES": | 2041 _parameter_list[0] == "CLAVES": |
2044 #print( "__CLAVES__" + str(_parameter_list[1:]) ) | 2042 #print( "__CLAVES__" + str(_parameter_list[1:]) ) |
2045 pass | 2043 pass |
2046 elif _parameter_list[0] == u"F" or \ | 2044 elif _parameter_list[0] == "F" or \ |
2047 _parameter_list[0] == u"COMERCIAL": | 2045 _parameter_list[0] == "COMERCIAL": |
2048 #print( "__COMERCIAL__" + str(_parameter_list[1:]) ) | 2046 #print( "__COMERCIAL__" + str(_parameter_list[1:]) ) |
2049 pass | 2047 pass |
2050 else: | 2048 else: |
2051 #print( "==PARAMETRO==" + str(_parameter_list[:]) ) | 2049 #print( "==PARAMETRO==" + str(_parameter_list[:]) ) |
2052 pass | 2050 pass |
2053 _final_description = _final_description + _line + u"\r\n" | 2051 _final_description = _final_description + _line + "\r\n" |
2054 | 2052 |
2055 #print( _line ) | 2053 #print( _line ) |
2056 # Delete last empty line | 2054 # Delete last empty line |
2057 _description = _final_description[:-2] | 2055 _description = _final_description[:-2] |
2058 _lines = _description.split(u"\r\n") | 2056 _lines = _description.split("\r\n") |
2059 for _line in _lines: | 2057 for _line in _lines: |
2060 pass | 2058 pass |
2061 #print( _line ) | 2059 #print( _line ) |
2062 self.__statistics.valid = self.__statistics.valid + 1 | 2060 self.__statistics.valid = self.__statistics.valid + 1 |
2063 | 2061 |
2086 if interface is None: | 2084 if interface is None: |
2087 interface = Interface() | 2085 interface = Interface() |
2088 interface.readFile_set_statistics(self.__statistics) | 2086 interface.readFile_set_statistics(self.__statistics) |
2089 _time = time.time() | 2087 _time = time.time() |
2090 try: | 2088 try: |
2091 _file = open(self.__filename, 'r') | 2089 _file = open(self.__filename, 'rb') |
2092 except IOError: | 2090 except IOError: |
2093 _str = utils.mapping("IOError: $1", (self.__filename,)) | 2091 _tuni = "IOError: $1" |
2094 print( _str.encode("utf-8") ) | 2092 _uni = utils.mapping(_tuni, (self.__filename,)) |
2093 print(_uni) | |
2095 return None | 2094 return None |
2096 _filesize = float(os.path.getsize(self.__filename)) | 2095 _filesize = float(os.path.getsize(self.__filename)) |
2097 if _filesize == 0.0: | 2096 if _filesize == 0.0: |
2098 _str = utils.mapping("Empty File: $1", (self.__filename,)) | 2097 _tuni = "Empty File: $1" |
2099 print( _str.encode("utf-8") ) | 2098 _uni = utils.mapping(_uni, (self.__filename,)) |
2099 print(_uni) | |
2100 # Todo: Create empty budget | 2100 # Todo: Create empty budget |
2101 return None | 2101 return None |
2102 self.__budget.filename = self.__filename | 2102 self.__budget.filename = self.__filename |
2103 interface.readFile_send_message(utils.mapping(_("Loading file $1"), | 2103 _tuni = _("Loading file $1") |
2104 (self.__filename,)).encode("utf-8")) | 2104 _uni = utils.mapping(_tuni, (self.__filename,)) |
2105 interface.readFile_send_message(_uni) | |
2105 interface.readFile_progress(_file.tell() / _filesize) | 2106 interface.readFile_progress(_file.tell() / _filesize) |
2106 _buffer = _file.read(1000) | 2107 _buffer = _file.read(1000) |
2108 _dbuffer = text(_buffer, self.__character_set) | |
2107 interface.updateGui() | 2109 interface.updateGui() |
2108 # set codepage from V record | 2110 # set codepage from V record |
2109 _record_list = _buffer.split("~") | 2111 _record_list = _dbuffer.split("~") |
2110 registro_V = _record_list[1] | 2112 registro_V = _record_list[1] |
2111 # ~V|[PROPIEDAD_ARCHIVO]|VERSION_FORMATO[\DDMMAAAA]|[PROGRAMA_EMISION]| | 2113 # ~V|[PROPIEDAD_ARCHIVO]|VERSION_FORMATO[\DDMMAAAA]|[PROGRAMA_EMISION]| |
2112 # [CABECERA]\{ ROTULO_IDENTIFICACION \}|[JUEGO_CARACTERES]| | 2114 # [CABECERA]\{ ROTULO_IDENTIFICACION \}|[JUEGO_CARACTERES]| |
2113 # [COMENTARIO]|[TIPO INFORMACIÓN]|[NÚMERO CERTIFICACIÓN]| | 2115 # [COMENTARIO]|[TIPO INFORMACIÓN]|[NÚMERO CERTIFICACIÓN]| |
2114 # [FECHA CERTIFICACIÓN ] | | 2116 # [FECHA CERTIFICACIÓN ] | |
2118 if len(registro_V) > 5: | 2120 if len(registro_V) > 5: |
2119 _version = registro_V[5].strip() | 2121 _version = registro_V[5].strip() |
2120 # remove leading spaces | 2122 # remove leading spaces |
2121 if _version in self.__character_sets_dict: | 2123 if _version in self.__character_sets_dict: |
2122 self.__character_set = self.__character_sets_dict[_version] | 2124 self.__character_set = self.__character_sets_dict[_version] |
2123 interface.readFile_send_message(utils.mapping( | 2125 _tuni = _("FIEBDC character encoding: $1") |
2124 _("FIEBDC character encoding: $1"), | 2126 _uni = utils.mapping(_tuni, (self.__character_set,)) |
2125 (self.__character_set,)).encode("utf8")) | 2127 interface.readFile_send_message(_uni) |
2126 else: | 2128 else: |
2127 interface.readFile_send_message(utils.mapping( | 2129 _tuni = _("This Character encoding do not exist in "\ |
2128 _("This Character encoding do not exist in "\ | 2130 "FIEBDC3! Default Character encoding: $1") |
2129 "FIEBDC3! Default Character encoding: $1"), | 2131 _uni = utils.mapping(_tuni, (self.__character_set,)) |
2130 (self.__character_set,)).encode("utf-8")) | 2132 interface.readFile_send_message(_uni) |
2131 else: | 2133 else: |
2132 interface.readFile_send_message(utils.mapping(_( | 2134 _tuni = _("This V record dot have a character encoding! "\ |
2133 "This V record dot have a character encoding! "\ | 2135 "Default character encoding: $1") |
2134 "Default character encoding: $1"), | 2136 _uni = utils.mapping(_tuni, (self.__character_set,)) |
2135 (self.__character_set,)).encode("utf-8")) | 2137 interface.readFile_send_message(_uni) |
2136 else: | 2138 else: |
2137 interface.readFile_send_message(utils.mapping(_( | 2139 _tuni = _("Not 'V' record in File! Default character encoding: $1") |
2138 "Not 'V' record in File! Default character encoding: "\ | 2140 _uni = utils.mapping(_tuni, (self.__character_set,)) |
2139 "$1"), (self.__character_set,)).encode("utf-8")) | 2141 interface.readFile_send_message(_uni) |
2140 _buffer = unicode(_buffer, self.__character_set) | 2142 _dbuffer = text(_buffer, self.__character_set) |
2141 interface.updateGui() | 2143 interface.updateGui() |
2142 # Any INFORMATION between the beginning of the file and the | 2144 # Any INFORMATION between the beginning of the file and the |
2143 # beginning of the first registry “~” is ignored | 2145 # beginning of the first registry “~” is ignored |
2144 #"after_first_tilde" : "^[^~]*~" | 2146 #"after_first_tilde" : "^[^~]*~" |
2145 _buffer = self.__pattern["after_first_tilde"].sub("",_buffer) | 2147 _dbuffer = self.__pattern["after_first_tilde"].sub("",_dbuffer) |
2146 while _buffer != u"" and not self.__cancel: | 2148 while _dbuffer != "" and not self.__cancel: |
2147 #-# the blank characters (32), tabs (9) and end of line (13 and 10) | 2149 #-# the blank characters (32), tabs (9) and end of line (13 and 10) |
2148 # before the separators '~', '|' are erased. | 2150 # before the separators '~', '|' are erased. |
2149 # Before separator \ not deleted because it affects the reading of | 2151 # Before separator \ not deleted because it affects the reading of |
2150 # the record ~P | 2152 # the record ~P |
2151 _buffer = self.eraseControlCharacters(_buffer) | 2153 _dbuffer = self.eraseControlCharacters(_dbuffer) |
2152 _record_list = _buffer.split(u"~") | 2154 _record_list = _dbuffer.split("~") |
2153 # The last record can be incomplete unless it is the last one of | 2155 # The last record can be incomplete unless it is the last one of |
2154 # the file | 2156 # the file |
2155 #if len(_record_list) > 1: | 2157 #if len(_record_list) > 1: |
2156 if (_file.tell() / _filesize) != 1.0: | 2158 if (_file.tell() / _filesize) != 1.0: |
2157 # not the end | 2159 # not the end |
2159 else: | 2161 else: |
2160 # The last record | 2162 # The last record |
2161 # The blank characters (32), tabs (9) and end of line | 2163 # The blank characters (32), tabs (9) and end of line |
2162 # (13 and 10) at the end of the file are ignored. | 2164 # (13 and 10) at the end of the file are ignored. |
2163 #"end_control" : "((\r\n)| |\t)+$" | 2165 #"end_control" : "((\r\n)| |\t)+$" |
2164 _record_list[-1] = self.__pattern["end_control"].sub(u"", | 2166 _record_list[-1] = self.__pattern["end_control"].sub("", |
2165 _record_list[-1]) | 2167 _record_list[-1]) |
2166 _last_record = u"" | 2168 _last_record = "" |
2167 for record in _record_list: | 2169 for record in _record_list: |
2168 if self.__cancel: | 2170 if self.__cancel: |
2169 break | 2171 break |
2170 self.parseRecord(record, interface) | 2172 self.parseRecord(record, interface) |
2171 interface.updateGui() | 2173 interface.updateGui() |
2172 interface.readFile_progress(_file.tell() / _filesize) | 2174 interface.readFile_progress(_file.tell() / _filesize) |
2173 _buffer2 = _file.read(100000) | 2175 _buffer2 = _file.read(100000) |
2174 interface.updateGui() | 2176 interface.updateGui() |
2175 _buffer2 = unicode(_buffer2, self.__character_set) | 2177 _dbuffer2 = text(_buffer2, self.__character_set) |
2176 _buffer = _last_record + _buffer2 | 2178 _dbuffer = _last_record + _dbuffer2 |
2177 interface.updateGui() | 2179 interface.updateGui() |
2178 _file.close() | 2180 _file.close() |
2179 if self.__cancel: | 2181 if self.__cancel: |
2180 interface.readFile_cancel() | 2182 interface.readFile_cancel() |
2181 return None | 2183 return None |
2182 else: | 2184 else: |
2183 self.__statistics.time = time.time()-_time | 2185 self.__statistics.time = time.time()-_time |
2184 if self.__statistics.O > 0: | 2186 if self.__statistics.O > 0: |
2185 _str = utils.mapping( | 2187 _tuni = _("$1 unsuported record type O: Comercial Relationship") |
2186 _("$1 unsuported record type O: Comercial Relationship"), | 2188 _uni = utils.mapping(_tuni, (text(self.__statistics.O,))) |
2187 (str(self.__statistics.O,))) | 2189 interface.readFile_send_message(_uni) |
2188 interface.readFile_send_message(_str.encode("utf-8")) | |
2189 if self.__statistics.valid == 0: | 2190 if self.__statistics.valid == 0: |
2190 _str = _("This file is not a valid FIBDC3 file") | 2191 _tuni = _("This file is not a valid FIBDC3 file") |
2191 interface.readFile_send_message(_str.encode("utf-8")) | 2192 interface.readFile_send_message(_tuni) |
2192 return None | 2193 return None |
2193 interface.readFile_end() | 2194 interface.readFile_end() |
2194 self._testBudget(self.__budget, interface) | 2195 self._testBudget(self.__budget, interface) |
2195 return None | 2196 return None |
2196 | 2197 |
2199 | 2200 |
2200 budget: base.obra object | 2201 budget: base.obra object |
2201 Test and repair budget object after read it from bc3 file | 2202 Test and repair budget object after read it from bc3 file |
2202 """ | 2203 """ |
2203 # TODO: more to do here | 2204 # TODO: more to do here |
2204 print( _("Testing budget ...").encode("utf-8") ) | 2205 _tuni = _("Testing budget ...") |
2206 print(_tuni) | |
2205 # Add price to records without price | 2207 # Add price to records without price |
2206 _iter = budget.iter() | 2208 _iter = budget.iter() |
2207 _titlelist = budget.getTitleList()[1] | 2209 _titlelist = budget.getTitleList()[1] |
2208 if len(_titlelist) == 0: | 2210 if len(_titlelist) == 0: |
2209 _titlenum = 1 | 2211 _titlenum = 1 |
2218 for _index in range(0,_leftprices): | 2220 for _index in range(0,_leftprices): |
2219 _root = budget.getRecord(budget.getRoot()) | 2221 _root = budget.getRecord(budget.getRoot()) |
2220 _price = [0.0, _root.getDate(_len_prices + _index)] | 2222 _price = [0.0, _root.getDate(_len_prices + _index)] |
2221 budget.addPriceToRecord(_price,_record) | 2223 budget.addPriceToRecord(_price,_record) |
2222 interface.updateGui() | 2224 interface.updateGui() |
2223 print(_("End Test").encode("utf-8")) | 2225 _tuni = _("End Test") |
2226 print(_tuni) | |
2224 | 2227 |
2225 def delete_control_space(self, text): | 2228 def delete_control_space(self, text): |
2226 text = self.delete_control(text) | 2229 text = self.delete_control(text) |
2227 text = text.replace(u" ", u"") | 2230 text = text.replace(" ", "") |
2228 return text | 2231 return text |
2229 | 2232 |
2230 def delete_control(self, text): | 2233 def delete_control(self, text): |
2231 text = text.replace(u"\t", u"") | 2234 text = text.replace("\t", "") |
2232 text = text.replace(u"\r", u"") | 2235 text = text.replace("\r", "") |
2233 text = text.replace(u"\n", u"") | 2236 text = text.replace("\n", "") |
2234 return text | 2237 return text |
2235 | 2238 |
2236 class Interface(object): | 2239 class Interface(object): |
2237 """fiebdc.Interface | 2240 """fiebdc.Interface |
2238 | 2241 |
2276 | 2279 |
2277 message: mesage from readFile method | 2280 message: mesage from readFile method |
2278 | 2281 |
2279 print( message ) | 2282 print( message ) |
2280 """ | 2283 """ |
2281 print( message.encode("utf-8") ) | 2284 print(message) |
2282 | 2285 |
2283 def readFile_progress(self, percent): | 2286 def readFile_progress(self, percent): |
2284 """progress(percent) | 2287 """progress(percent) |
2285 | 2288 |
2286 percent: Percentage executed. | 2289 percent: Percentage executed. |
2293 """readFile_end() | 2296 """readFile_end() |
2294 | 2297 |
2295 The readFile method end successfully | 2298 The readFile method end successfully |
2296 """ | 2299 """ |
2297 self.endSuccessfully == True | 2300 self.endSuccessfully == True |
2298 print(self.__statistics.encode("utf-8")) | 2301 print(self.__statistics) |
2299 print(("progreso = " + str(self.__progress)).encode("utf-8")) | 2302 _tuni = "progreso = $1" |
2303 _uni = utils.mapping(_tuni, (text(self.__progress,))) | |
2304 print(_uni) | |
2300 | 2305 |
2301 def readFile_cancel(self): | 2306 def readFile_cancel(self): |
2302 """readFile_cancel() | 2307 """readFile_cancel() |
2303 | 2308 |
2304 The readFile method is canceled | 2309 The readFile method is canceled |
2305 """ | 2310 """ |
2306 self.endSuccessfully == False | 2311 self.endSuccessfully == False |
2307 print( _("Process terminated").encode("utf-8") ) | 2312 _tuni = _("Process terminated") |
2308 print(("progreso = " + str(self.__progress)).encode("utf-8")) | 2313 print(_tuni) |
2314 _tuni = "progreso = $1" | |
2315 _uni = utils.mapping(_tuni, (text(self.__progress,))) | |
2316 print(_tuni) | |
2309 | 2317 |
2310 def updateGui(self): | 2318 def updateGui(self): |
2311 """updateGui(self) | 2319 """updateGui(self) |
2312 | 2320 |
2313 Some interfaces need update gui while doing some time intensive | 2321 Some interfaces need update gui while doing some time intensive |
2383 | 2391 |
2384 def __str__(self): | 2392 def __str__(self): |
2385 return self.str() | 2393 return self.str() |
2386 | 2394 |
2387 def str(self): | 2395 def str(self): |
2388 _str = utils.mapping(_("Time to load: $1 seconds"), | 2396 _tuni1 = _("Time to load: $1 seconds") |
2389 (("%.2f" %(self.time)),)) + "\n" + \ | 2397 _uni1 = utils.mapping(_tuni1, (("%.2f" %(self.time)),)) |
2390 utils.mapping(_("Records/Valid Records: $1/$2"), | 2398 _tuni2 = _("Records/Valid Records: $1/$2") |
2391 (str(self.records), str(self.valid))) + "\n" +\ | 2399 _uni2 = utils.mapping(_tuni2, |
2400 (text(self.records), text(self.valid))) | |
2401 _uni = _uni1 + "\n" + \ | |
2402 _uni2 + "\n" +\ | |
2392 "V: %s\n" %(self.V,) + \ | 2403 "V: %s\n" %(self.V,) + \ |
2393 "C: %s\n" %(self.C,) + \ | 2404 "C: %s\n" %(self.C,) + \ |
2394 "D: %s\n" %(self.D,) + \ | 2405 "D: %s\n" %(self.D,) + \ |
2395 "Y: %s\n" %(self.Y,) + \ | 2406 "Y: %s\n" %(self.Y,) + \ |
2396 "M: %s\n" %(self.M,) + \ | 2407 "M: %s\n" %(self.M,) + \ |
2408 "X: %s\n" %(self.X,) + \ | 2419 "X: %s\n" %(self.X,) + \ |
2409 "B: %s\n" %(self.B,) + \ | 2420 "B: %s\n" %(self.B,) + \ |
2410 "F: %s\n" %(self.F,) + \ | 2421 "F: %s\n" %(self.F,) + \ |
2411 "A: %s\n" %(self.A,) + \ | 2422 "A: %s\n" %(self.A,) + \ |
2412 "?: %s\n" %(self.unknow,) | 2423 "?: %s\n" %(self.unknow,) |
2413 return _str.encode("utf8") | 2424 return _uni |
2414 | 2425 |