PageRenderTime 66ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/lettuce/strings.py

https://github.com/OddBloke/lettuce
Python | 186 lines | 155 code | 15 blank | 16 comment | 10 complexity | 8b3302d6f01a4d799fe721a2a390a8a4 MD5 | raw file
Possible License(s): GPL-3.0, BSD-3-Clause
  1. # -*- coding: utf-8 -*-
  2. # <Lettuce - Behaviour Driven Development for python>
  3. # Copyright (C) <2010-2012> Gabriel Falc達o <gabriel@nacaolivre.org>
  4. #
  5. # This program is free software: you can redistribute it and/or modify
  6. # it under the terms of the GNU General Public License as published by
  7. # the Free Software Foundation, either version 3 of the License, or
  8. # (at your option) any later version.
  9. #
  10. # This program is distributed in the hope that it will be useful,
  11. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. # GNU General Public License for more details.
  14. #
  15. # You should have received a copy of the GNU General Public License
  16. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. import re
  18. import time
  19. import unicodedata
  20. def utf8_string(s):
  21. if isinstance(s, str):
  22. s = s.decode("utf-8")
  23. elif isinstance(s, unicode):
  24. s = s.encode("utf-8")
  25. return s
  26. def escape_if_necessary(what):
  27. what = unicode(what)
  28. if len(what) is 1:
  29. what = u"[%s]" % what
  30. return what
  31. def get_stripped_lines(string, ignore_lines_starting_with=''):
  32. string = unicode(string)
  33. lines = [unicode(l.strip()) for l in string.splitlines()]
  34. if ignore_lines_starting_with:
  35. filter_func = lambda x: x and not x.startswith(
  36. ignore_lines_starting_with)
  37. else:
  38. filter_func = lambda x: x
  39. lines = filter(filter_func, lines)
  40. return lines
  41. def split_wisely(string, sep, strip=False):
  42. string = unicode(string)
  43. if strip:
  44. string = string.strip()
  45. else:
  46. string = string.strip("\n")
  47. sep = unicode(sep)
  48. regex = re.compile(escape_if_necessary(sep), re.UNICODE | re.M | re.I)
  49. items = filter(lambda x: x, regex.split(string))
  50. if strip:
  51. items = [i.strip() for i in items]
  52. else:
  53. items = [i.strip("\n") for i in items]
  54. return [unicode(i) for i in items]
  55. def wise_startswith(string, seed):
  56. string = unicode(string).strip()
  57. seed = unicode(seed)
  58. regex = u"^%s" % re.escape(seed)
  59. return bool(re.search(regex, string, re.I))
  60. def remove_it(string, what):
  61. return unicode(re.sub(unicode(what), "", unicode(string)).strip())
  62. def column_width(string):
  63. l = 0
  64. for c in unicode(string):
  65. if unicodedata.east_asian_width(c) in "WF":
  66. l += 2
  67. else:
  68. l += 1
  69. return l
  70. def rfill(string, times, char=u" ", append=u""):
  71. string = unicode(string)
  72. missing = times - column_width(string)
  73. for x in range(missing):
  74. string += char
  75. return unicode(string) + unicode(append)
  76. def getlen(string):
  77. return column_width(unicode(string)) + 1
  78. def dicts_to_string(dicts, order):
  79. escape = "#{%s}" % unicode(time.time())
  80. def enline(line):
  81. return unicode(line).replace("|", escape)
  82. def deline(line):
  83. return line.replace(escape, '\\|')
  84. keys_and_sizes = dict([(k, getlen(k)) for k in dicts[0].keys()])
  85. for key in keys_and_sizes:
  86. for data in dicts:
  87. current_size = keys_and_sizes[key]
  88. value = unicode(data.get(key, ''))
  89. size = getlen(value)
  90. if size > current_size:
  91. keys_and_sizes[key] = size
  92. names = []
  93. for key in order:
  94. size = keys_and_sizes[key]
  95. name = u" %s" % rfill(key, size)
  96. names.append(enline(name))
  97. table = [u"|%s|" % "|".join(names)]
  98. for data in dicts:
  99. names = []
  100. for key in order:
  101. value = data.get(key, '')
  102. size = keys_and_sizes[key]
  103. names.append(enline(u" %s" % rfill(value, size)))
  104. table.append(u"|%s|" % "|".join(names))
  105. return deline(u"\n".join(table) + u"\n")
  106. def parse_hashes(lines):
  107. escape = "#{%s}" % unicode(time.time())
  108. def enline(line):
  109. return unicode(line.replace("\\|", escape)).strip()
  110. def deline(line):
  111. return line.replace(escape, '|')
  112. def discard_comments(lines):
  113. return [line for line in lines if not line.startswith('#')]
  114. lines = discard_comments(lines)
  115. lines = map(enline, lines)
  116. keys = []
  117. hashes = []
  118. if lines:
  119. first_line = lines.pop(0)
  120. keys = split_wisely(first_line, u"|", True)
  121. keys = map(deline, keys)
  122. for line in lines:
  123. values = split_wisely(line, u"|", True)
  124. values = map(deline, values)
  125. hashes.append(dict(zip(keys, values)))
  126. return keys, hashes
  127. def parse_multiline(lines):
  128. multilines = []
  129. in_multiline = False
  130. for line in lines:
  131. if line == '"""':
  132. in_multiline = not in_multiline
  133. elif in_multiline:
  134. if line.startswith('"'):
  135. line = line[1:]
  136. if line.endswith('"'):
  137. line = line[:-1]
  138. multilines.append(line)
  139. return u'\n'.join(multilines)