diff --git a/common/.local/bin/powerline b/common/.local/bin/powerline deleted file mode 100755 index 2d38ac2..0000000 --- a/common/.local/bin/powerline +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/python -# vim:fileencoding=utf-8:noet -'''Powerline prompt and statusline script.''' -import sys -import os - -try: - from powerline.shell import ShellPowerline, get_argparser -except ImportError: - import os - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - from powerline.shell import ShellPowerline, get_argparser # NOQA - -if __name__ == '__main__': - args = get_argparser(description=__doc__).parse_args() - powerline = ShellPowerline(args, run_once=True) - rendered = powerline.render( - width=args.width, - side=args.side, - segment_info={'args': args, 'environ': os.environ}, - ) - try: - sys.stdout.write(rendered) - except UnicodeEncodeError: - sys.stdout.write(rendered.encode('utf-8')) diff --git a/common/.local/bin/powerline-lint b/common/.local/bin/powerline-lint deleted file mode 100755 index 6734e5c..0000000 --- a/common/.local/bin/powerline-lint +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/python -# vim:fileencoding=utf-8:noet -'''Powerline configuration checker.''' -import argparse -from powerline.lint import check -import sys - - -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument('-p', '--config_path', metavar='PATH') - -if __name__ == '__main__': - args = parser.parse_args() - sys.exit(check(args.config_path)) diff --git a/common/.local/bin/powerline-zsh.py b/common/.local/bin/powerline-zsh.py deleted file mode 100755 index ada55b8..0000000 --- a/common/.local/bin/powerline-zsh.py +++ /dev/null @@ -1,325 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import subprocess -import sys -import re -import argparse - - -def warn(msg): - print '[powerline-zsh] ', msg - - -class Color: - # The following link is a pretty good resources for color values: - # http://www.calmar.ws/vim/color-output.png - - PATH_BG = 237 # dark grey - PATH_FG = 250 # light grey - CWD_FG = 254 # nearly-white grey - SEPARATOR_FG = 244 - - REPO_CLEAN_BG = 148 # a light green color - REPO_CLEAN_FG = 0 # black - REPO_DIRTY_BG = 161 # pink/red - REPO_DIRTY_FG = 15 # white - - CMD_PASSED_BG = 236 - CMD_PASSED_FG = 15 - CMD_FAILED_BG = 161 - CMD_FAILED_FG = 15 - - SVN_CHANGES_BG = 148 - SVN_CHANGES_FG = 22 # dark green - - VIRTUAL_ENV_BG = 35 # a mid-tone green - VIRTUAL_ENV_FG = 22 - - -class Powerline: - symbols = { - 'compatible': { - 'separator': u'\u25B6', - 'separator_thin': u'\u276F' - }, - 'patched': { - 'separator': u'\u2B80', - 'separator_thin': u'\u2B81' - }, - 'default': { - 'separator': '⮀', - 'separator_thin': '⮁' - } - } - LSQESCRSQ = '\\[\\e%s\\]' - reset = ' %f%k' - - def __init__(self, mode='default'): - self.separator = Powerline.symbols[mode]['separator'] - self.separator_thin = Powerline.symbols[mode]['separator_thin'] - self.segments = [] - - def color(self, prefix, code): - if prefix == '38': - return '%%F{%s}' % code - elif prefix == '48': - return '%%K{%s}' % code - - def fgcolor(self, code): - return self.color('38', code) - - def bgcolor(self, code): - return self.color('48', code) - - def append(self, segment): - self.segments.append(segment) - - def draw(self): - return (''.join((s[0].draw(self, s[1]) for s in zip(self.segments, self.segments[1:] + [None]))) - + self.reset) - - -class Segment: - def __init__(self, powerline, content, fg, bg, separator=None, separator_fg=None): - self.powerline = powerline - self.content = content - self.fg = fg - self.bg = bg - self.separator = separator or powerline.separator - self.separator_fg = separator_fg or bg - - def draw(self, powerline, next_segment=None): - if next_segment: - separator_bg = powerline.bgcolor(next_segment.bg) - else: - separator_bg = powerline.reset - - return ''.join(( - powerline.fgcolor(self.fg), - powerline.bgcolor(self.bg), - self.content, - separator_bg, - powerline.fgcolor(self.separator_fg), - self.separator)) - - -def add_cwd_segment(powerline, cwd, maxdepth, cwd_only=False): - #powerline.append(' \\w ', 15, 237) - home = os.getenv('HOME') - cwd = os.getenv('PWD') - - if cwd.find(home) == 0: - cwd = cwd.replace(home, '~', 1) - - if cwd[0] == '/': - cwd = cwd[1:] - - names = cwd.split('/') - if len(names) > maxdepth: - names = names[:2] + ['⋯ '] + names[2 - maxdepth:] - - if not cwd_only: - for n in names[:-1]: - powerline.append(Segment(powerline, ' %s ' % n, Color.PATH_FG, Color.PATH_BG, powerline.separator_thin, Color.SEPARATOR_FG)) - powerline.append(Segment(powerline, ' %s ' % names[-1], Color.CWD_FG, Color.PATH_BG)) - - -def get_hg_status(): - has_modified_files = False - has_untracked_files = False - has_missing_files = False - output = subprocess.Popen(['hg', 'status'], stdout=subprocess.PIPE).communicate()[0] - for line in output.split('\n'): - if line == '': - continue - elif line[0] == '?': - has_untracked_files = True - elif line[0] == '!': - has_missing_files = True - else: - has_modified_files = True - return has_modified_files, has_untracked_files, has_missing_files - - -def add_hg_segment(powerline, cwd): - branch = os.popen('hg branch 2> /dev/null').read().rstrip() - if len(branch) == 0: - return False - bg = Color.REPO_CLEAN_BG - fg = Color.REPO_CLEAN_FG - has_modified_files, has_untracked_files, has_missing_files = get_hg_status() - if has_modified_files or has_untracked_files or has_missing_files: - bg = Color.REPO_DIRTY_BG - fg = Color.REPO_DIRTY_FG - extra = '' - if has_untracked_files: - extra += '+' - if has_missing_files: - extra += '!' - branch += (' ' + extra if extra != '' else '') - powerline.append(Segment(powerline, ' %s ' % branch, fg, bg)) - return True - - -def get_git_status(): - has_pending_commits = True - has_untracked_files = False - detached_head = False - origin_position = "" - current_branch = '' - output = subprocess.Popen(['git', 'status', '-unormal'], stdout=subprocess.PIPE).communicate()[0] - for line in output.split('\n'): - origin_status = re.findall("Your branch is (ahead|behind).*?(\d+) comm", line) - if len(origin_status) > 0: - origin_position = " %d" % int(origin_status[0][1]) - if origin_status[0][0] == 'behind': - origin_position += '⇣' - if origin_status[0][0] == 'ahead': - origin_position += '⇡' - - if line.find('nothing to commit (working directory clean)') >= 0: - has_pending_commits = False - if line.find('Untracked files') >= 0: - has_untracked_files = True - if line.find('Not currently on any branch') >= 0: - detached_head = True - if line.find('On branch') >= 0: - current_branch = re.findall('On branch ([^ ]+)', line)[0] - return has_pending_commits, has_untracked_files, origin_position, detached_head, current_branch - - -def add_git_segment(powerline, cwd): - #cmd = "git branch 2> /dev/null | grep -e '\\*'" - p1 = subprocess.Popen(['git', 'branch'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(['grep', '-e', '\\*'], stdin=p1.stdout, stdout=subprocess.PIPE) - output = p2.communicate()[0].strip() - if len(output) == 0: - return False - - has_pending_commits, has_untracked_files, origin_position, detached_head, current_branch = get_git_status() - - if len(current_branch) > 0: - branch = current_branch - elif detached_head: - branch = subprocess.Popen(['git', 'describe', '--all', '--contains', '--abbrev=4', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - branch = '((' + branch.communicate()[0].strip() + '))' - else: - return 'master' - - branch += origin_position - - if has_untracked_files: - branch += ' +' - - bg = Color.REPO_CLEAN_BG - fg = Color.REPO_CLEAN_FG - - if has_pending_commits: - bg = Color.REPO_DIRTY_BG - fg = Color.REPO_DIRTY_FG - - powerline.append(Segment(powerline, ' %s ' % branch, fg, bg)) - return True - - -def add_svn_segment(powerline, cwd): - if not os.path.exists(os.path.join(cwd, '.svn')): - return - '''svn info: - First column: Says if item was added, deleted, or otherwise changed - ' ' no modifications - 'A' Added - 'C' Conflicted - 'D' Deleted - 'I' Ignored - 'M' Modified - 'R' Replaced - 'X' an unversioned directory created by an externals definition - '?' item is not under version control - '!' item is missing (removed by non-svn command) or incomplete - '~' versioned item obstructed by some item of a different kind - ''' - #TODO: Color segment based on above status codes - try: - #cmd = '"svn status | grep -c "^[ACDIMRX\\!\\~]"' - p1 = subprocess.Popen(['svn', 'status'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(['grep', '-c', '^[ACDIMRX\\!\\~]'], stdin=p1.stdout, stdout=subprocess.PIPE) - output = p2.communicate()[0].strip() - if len(output) > 0 and int(output) > 0: - changes = output.strip() - powerline.append(Segment(powerline, ' %s ' % changes, Color.SVN_CHANGES_FG, Color.SVN_CHANGES_BG)) - except OSError: - return False - except subprocess.CalledProcessError: - return False - return True - - -def add_repo_segment(powerline, cwd): - for add_repo_segment in [add_git_segment, add_svn_segment, add_hg_segment]: - try: - if add_repo_segment(p, cwd): - return - except subprocess.CalledProcessError: - pass - except OSError: - pass - - -def add_virtual_env_segment(powerline, cwd): - env = os.getenv("VIRTUAL_ENV") - if env is None: - return False - env_name = os.path.basename(env) - bg = Color.VIRTUAL_ENV_BG - fg = Color.VIRTUAL_ENV_FG - powerline.append(Segment(powerline, ' %s ' % env_name, fg, bg)) - return True - - -def add_root_indicator(powerline, error): - bg = Color.CMD_PASSED_BG - fg = Color.CMD_PASSED_FG - if int(error) != 0: - fg = Color.CMD_FAILED_FG - bg = Color.CMD_FAILED_BG - powerline.append(Segment(powerline, ' $ ', fg, bg)) - - -def get_valid_cwd(): - try: - cwd = os.getcwd() - except: - cwd = os.getenv('PWD') # This is where the OS thinks we are - parts = cwd.split(os.sep) - up = cwd - while parts and not os.path.exists(up): - parts.pop() - up = os.sep.join(parts) - try: - os.chdir(up) - except: - warn("Your current directory is invalid.") - sys.exit(1) - warn("Your current directory is invalid. Lowest valid directory: " + up) - return cwd - -if __name__ == '__main__': - arg_parser = argparse.ArgumentParser() - arg_parser.add_argument('--cwd-only', action="store_true") - arg_parser.add_argument('prev_error', nargs='?', default=0) - args = arg_parser.parse_args() - - p = Powerline(mode='default') - cwd = get_valid_cwd() - add_virtual_env_segment(p, cwd) - #p.append(Segment(' \\u ', 250, 240)) - #p.append(Segment(' \\h ', 250, 238)) - add_cwd_segment(p, cwd, 5, args.cwd_only) - add_repo_segment(p, cwd) - add_root_indicator(p, args.prev_error) - sys.stdout.write(p.draw()) - -# vim: set expandtab: diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/PKG-INFO b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/PKG-INFO deleted file mode 100644 index 46f7b24..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/PKG-INFO +++ /dev/null @@ -1,63 +0,0 @@ -Metadata-Version: 1.0 -Name: Powerline -Version: beta -Summary: The ultimate statusline/prompt utility. -Home-page: https://github.com/Lokaltog/powerline -Author: Kim Silkebækken -Author-email: kim.silkebaekken+vim@gmail.com -License: UNKNOWN -Description: Powerline - ========= - - :Author: Kim Silkebækken (kim.silkebaekken+vim@gmail.com) - :Source: https://github.com/Lokaltog/powerline - :Version: beta - :Build status: - .. image:: https://api.travis-ci.org/Lokaltog/powerline.png?branch=develop - :target: `travis-build-status`_ - :alt: Build status - - This is the upcoming version of Powerline, implemented in Python. The - project is currently in a stable beta and almost ready for release. - - * Consult the `documentation - `_ for more information and - installation instructions. - * Check out `powerline-fonts `_ - for pre-patched versions of popular coding fonts. - - .. _travis-build-status: https://travis-ci.org/Lokaltog/powerline - - Screenshots - ----------- - - Vim statusline - ^^^^^^^^^^^^^^ - - **Mode-dependent highlighting** - - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-mode-normal.png - :alt: Normal mode - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-mode-insert.png - :alt: Insert mode - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-mode-visual.png - :alt: Visual mode - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-mode-replace.png - :alt: Replace mode - - **Automatic truncation of segments in small windows** - - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-truncate1.png - :alt: Truncation illustration - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-truncate2.png - :alt: Truncation illustration - * .. image:: https://raw.github.com/Lokaltog/powerline/develop/docs/source/_static/img/pl-truncate3.png - :alt: Truncation illustration - - ---- - - The font in the screenshots is `Pragmata Pro`_ by Fabrizio Schiavi. - - .. _`Pragmata Pro`: http://www.fsd.it/fonts/pragmatapro.htm - -Platform: UNKNOWN diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/SOURCES.txt b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/SOURCES.txt deleted file mode 100644 index 421a0d9..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/SOURCES.txt +++ /dev/null @@ -1,98 +0,0 @@ -MANIFEST.in -README.rst -Powerline.egg-info/PKG-INFO -Powerline.egg-info/SOURCES.txt -Powerline.egg-info/dependency_links.txt -Powerline.egg-info/not-zip-safe -Powerline.egg-info/requires.txt -Powerline.egg-info/top_level.txt -powerline/__init__.py -powerline/colorscheme.py -powerline/ipython.py -powerline/matcher.py -powerline/renderer.py -powerline/segment.py -powerline/shell.py -powerline/theme.py -powerline/vim.py -powerline/bindings/__init__.py -powerline/bindings/awesome/powerline-awesome.py -powerline/bindings/awesome/powerline.lua -powerline/bindings/bash/powerline.sh -powerline/bindings/ipython/__init__.py -powerline/bindings/ipython/post_0_11.py -powerline/bindings/ipython/pre_0_11.py -powerline/bindings/qtile/__init__.py -powerline/bindings/qtile/widget.py -powerline/bindings/tmux/powerline.conf -powerline/bindings/vim/__init__.py -powerline/bindings/vim/plugin/powerline.vim -powerline/bindings/zsh/__init__.py -powerline/bindings/zsh/powerline.zsh -powerline/config_files/colors.json -powerline/config_files/config.json -powerline/config_files/colorschemes/ipython/default.json -powerline/config_files/colorschemes/shell/default.json -powerline/config_files/colorschemes/shell/solarized.json -powerline/config_files/colorschemes/tmux/default.json -powerline/config_files/colorschemes/vim/default.json -powerline/config_files/colorschemes/vim/solarized.json -powerline/config_files/colorschemes/wm/default.json -powerline/config_files/themes/ipython/in.json -powerline/config_files/themes/ipython/in2.json -powerline/config_files/themes/ipython/out.json -powerline/config_files/themes/ipython/rewrite.json -powerline/config_files/themes/shell/default.json -powerline/config_files/themes/shell/default_leftonly.json -powerline/config_files/themes/tmux/default.json -powerline/config_files/themes/vim/cmdwin.json -powerline/config_files/themes/vim/default.json -powerline/config_files/themes/vim/help.json -powerline/config_files/themes/vim/quickfix.json -powerline/config_files/themes/wm/default.json -powerline/lib/__init__.py -powerline/lib/config.py -powerline/lib/file_watcher.py -powerline/lib/humanize_bytes.py -powerline/lib/inotify.py -powerline/lib/memoize.py -powerline/lib/monotonic.py -powerline/lib/threaded.py -powerline/lib/tree_watcher.py -powerline/lib/url.py -powerline/lib/vcs/__init__.py -powerline/lib/vcs/bzr.py -powerline/lib/vcs/git.py -powerline/lib/vcs/mercurial.py -powerline/lint/__init__.py -powerline/lint/inspect.py -powerline/lint/markedjson/__init__.py -powerline/lint/markedjson/composer.py -powerline/lint/markedjson/constructor.py -powerline/lint/markedjson/error.py -powerline/lint/markedjson/events.py -powerline/lint/markedjson/loader.py -powerline/lint/markedjson/markedvalue.py -powerline/lint/markedjson/nodes.py -powerline/lint/markedjson/parser.py -powerline/lint/markedjson/reader.py -powerline/lint/markedjson/resolver.py -powerline/lint/markedjson/scanner.py -powerline/lint/markedjson/tokens.py -powerline/matchers/__init__.py -powerline/matchers/vim.py -powerline/renderers/__init__.py -powerline/renderers/bash_prompt.py -powerline/renderers/ipython.py -powerline/renderers/pango_markup.py -powerline/renderers/shell.py -powerline/renderers/tmux.py -powerline/renderers/vim.py -powerline/renderers/zsh_prompt.py -powerline/segments/__init__.py -powerline/segments/common.py -powerline/segments/ipython.py -powerline/segments/shell.py -powerline/segments/vim.py -scripts/powerline -scripts/powerline-lint \ No newline at end of file diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/dependency_links.txt b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/installed-files.txt b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/installed-files.txt deleted file mode 100644 index ab11d49..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/installed-files.txt +++ /dev/null @@ -1,159 +0,0 @@ -../powerline/shell.py -../powerline/colorscheme.py -../powerline/ipython.py -../powerline/theme.py -../powerline/vim.py -../powerline/__init__.py -../powerline/matcher.py -../powerline/renderer.py -../powerline/segment.py -../powerline/matchers/vim.py -../powerline/matchers/__init__.py -../powerline/bindings/__init__.py -../powerline/lint/__init__.py -../powerline/lint/inspect.py -../powerline/segments/shell.py -../powerline/segments/ipython.py -../powerline/segments/common.py -../powerline/segments/vim.py -../powerline/segments/__init__.py -../powerline/renderers/shell.py -../powerline/renderers/ipython.py -../powerline/renderers/bash_prompt.py -../powerline/renderers/zsh_prompt.py -../powerline/renderers/vim.py -../powerline/renderers/__init__.py -../powerline/renderers/pango_markup.py -../powerline/renderers/tmux.py -../powerline/lib/memoize.py -../powerline/lib/monotonic.py -../powerline/lib/config.py -../powerline/lib/humanize_bytes.py -../powerline/lib/__init__.py -../powerline/lib/inotify.py -../powerline/lib/url.py -../powerline/lib/threaded.py -../powerline/lib/file_watcher.py -../powerline/lib/tree_watcher.py -../powerline/bindings/ipython/post_0_11.py -../powerline/bindings/ipython/pre_0_11.py -../powerline/bindings/ipython/__init__.py -../powerline/bindings/qtile/__init__.py -../powerline/bindings/qtile/widget.py -../powerline/bindings/vim/__init__.py -../powerline/bindings/zsh/__init__.py -../powerline/lint/markedjson/composer.py -../powerline/lint/markedjson/constructor.py -../powerline/lint/markedjson/loader.py -../powerline/lint/markedjson/nodes.py -../powerline/lint/markedjson/resolver.py -../powerline/lint/markedjson/scanner.py -../powerline/lint/markedjson/reader.py -../powerline/lint/markedjson/__init__.py -../powerline/lint/markedjson/parser.py -../powerline/lint/markedjson/markedvalue.py -../powerline/lint/markedjson/events.py -../powerline/lint/markedjson/tokens.py -../powerline/lint/markedjson/error.py -../powerline/lib/vcs/git.py -../powerline/lib/vcs/mercurial.py -../powerline/lib/vcs/__init__.py -../powerline/lib/vcs/bzr.py -../powerline/config_files/colors.json -../powerline/config_files/config.json -../powerline/config_files/colorschemes/ipython/default.json -../powerline/config_files/colorschemes/shell/default.json -../powerline/config_files/colorschemes/shell/solarized.json -../powerline/config_files/colorschemes/tmux/default.json -../powerline/config_files/colorschemes/vim/default.json -../powerline/config_files/colorschemes/vim/solarized.json -../powerline/config_files/colorschemes/wm/default.json -../powerline/config_files/themes/ipython/in.json -../powerline/config_files/themes/ipython/in2.json -../powerline/config_files/themes/ipython/out.json -../powerline/config_files/themes/ipython/rewrite.json -../powerline/config_files/themes/shell/default.json -../powerline/config_files/themes/shell/default_leftonly.json -../powerline/config_files/themes/tmux/default.json -../powerline/config_files/themes/vim/cmdwin.json -../powerline/config_files/themes/vim/default.json -../powerline/config_files/themes/vim/help.json -../powerline/config_files/themes/vim/quickfix.json -../powerline/config_files/themes/wm/default.json -../powerline/bindings/awesome/powerline-awesome.py -../powerline/bindings/awesome/powerline.lua -../powerline/bindings/bash/powerline.sh -../powerline/bindings/tmux/powerline.conf -../powerline/bindings/vim/plugin/powerline.vim -../powerline/bindings/zsh/powerline.zsh -../powerline/shell.pyc -../powerline/colorscheme.pyc -../powerline/ipython.pyc -../powerline/theme.pyc -../powerline/vim.pyc -../powerline/__init__.pyc -../powerline/matcher.pyc -../powerline/renderer.pyc -../powerline/segment.pyc -../powerline/matchers/vim.pyc -../powerline/matchers/__init__.pyc -../powerline/bindings/__init__.pyc -../powerline/lint/__init__.pyc -../powerline/lint/inspect.pyc -../powerline/segments/shell.pyc -../powerline/segments/ipython.pyc -../powerline/segments/common.pyc -../powerline/segments/vim.pyc -../powerline/segments/__init__.pyc -../powerline/renderers/shell.pyc -../powerline/renderers/ipython.pyc -../powerline/renderers/bash_prompt.pyc -../powerline/renderers/zsh_prompt.pyc -../powerline/renderers/vim.pyc -../powerline/renderers/__init__.pyc -../powerline/renderers/pango_markup.pyc -../powerline/renderers/tmux.pyc -../powerline/lib/memoize.pyc -../powerline/lib/monotonic.pyc -../powerline/lib/config.pyc -../powerline/lib/humanize_bytes.pyc -../powerline/lib/__init__.pyc -../powerline/lib/inotify.pyc -../powerline/lib/url.pyc -../powerline/lib/threaded.pyc -../powerline/lib/file_watcher.pyc -../powerline/lib/tree_watcher.pyc -../powerline/bindings/ipython/post_0_11.pyc -../powerline/bindings/ipython/pre_0_11.pyc -../powerline/bindings/ipython/__init__.pyc -../powerline/bindings/qtile/__init__.pyc -../powerline/bindings/qtile/widget.pyc -../powerline/bindings/vim/__init__.pyc -../powerline/bindings/zsh/__init__.pyc -../powerline/lint/markedjson/composer.pyc -../powerline/lint/markedjson/constructor.pyc -../powerline/lint/markedjson/loader.pyc -../powerline/lint/markedjson/nodes.pyc -../powerline/lint/markedjson/resolver.pyc -../powerline/lint/markedjson/scanner.pyc -../powerline/lint/markedjson/reader.pyc -../powerline/lint/markedjson/__init__.pyc -../powerline/lint/markedjson/parser.pyc -../powerline/lint/markedjson/markedvalue.pyc -../powerline/lint/markedjson/events.pyc -../powerline/lint/markedjson/tokens.pyc -../powerline/lint/markedjson/error.pyc -../powerline/lib/vcs/git.pyc -../powerline/lib/vcs/mercurial.pyc -../powerline/lib/vcs/__init__.pyc -../powerline/lib/vcs/bzr.pyc -../powerline/bindings/awesome/powerline-awesome.pyc -./ -SOURCES.txt -PKG-INFO -not-zip-safe -dependency_links.txt -top_level.txt -requires.txt -../../../../bin/powerline -../../../../bin/powerline-lint diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/not-zip-safe b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/not-zip-safe deleted file mode 100644 index 8b13789..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/requires.txt b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/requires.txt deleted file mode 100644 index c4052b2..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/requires.txt +++ /dev/null @@ -1,4 +0,0 @@ - - -[docs] -Sphinx \ No newline at end of file diff --git a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/top_level.txt b/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/top_level.txt deleted file mode 100644 index f2ffc12..0000000 --- a/common/.local/lib/python2.7/site-packages/Powerline-beta.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -powerline diff --git a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/PKG-INFO b/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/PKG-INFO deleted file mode 100644 index 24a9e45..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/PKG-INFO +++ /dev/null @@ -1,282 +0,0 @@ -Metadata-Version: 1.1 -Name: jedi -Version: 0.5b5 -Summary: An autocompletion tool for Python that can be used for text editors. -Home-page: https://github.com/davidhalter/jedi -Author: David Halter -Author-email: davidhalter88@gmail.com -License: LGPLv3 -Description: ######################################## - Jedi - an awesome Python auto-completion - ######################################## - - .. image:: https://secure.travis-ci.org/davidhalter/jedi.png?branch=master - :target: http://travis-ci.org/davidhalter/jedi - :alt: Travis-CI build status - - **beta testing** - - *If you have any comments or feature requests, please tell me! I really want to - know, what you think about Jedi.* - - Jedi is an autocompletion tool for Python. It works. With and without syntax - errors. Sometimes it sucks, but that's normal in dynamic languages. But it - sucks less than other tools. It understands almost all of the basic Python - syntax elements including many builtins. - - Jedi suports two different goto functions and has support for renaming. - Probably it will also have some support for refactoring in the future. - - Jedi uses a very simple interface to connect with IDE's. As an reference, there - is a VIM implementation, which uses Jedi's autocompletion. However, I encourage - you to use Jedi in your IDEs. Start writing plugins! If there are problems with - licensing, just contact me. - - At the moment Jedi can be used as a - `VIM-Plugin `_. So, if you want to test - Jedi for now, you'll have to use VIM. But there are new plugins emerging: - - - `Emacs-Plugin `_ - - `Sublime-Plugin `_ **Under construction** - - Here are some pictures: - - .. image:: https://github.com/davidhalter/jedi/raw/master/screenshot_complete.png - - Completion for almost anything (Ctrl+Space). - - .. image:: https://github.com/davidhalter/jedi/raw/master/screenshot_function.png - - Display of function/class bodies, docstrings. - - .. image:: https://github.com/davidhalter/jedi/raw/master/screenshot_pydoc.png - - Pydoc support (with highlighting, Shift+k). - - There is also support for goto and renaming. - - Get the latest from `github `_. - - - Installation - ============ - - You can either include Jedi as a submodule in your text editor plugin (like - jedi-vim_ does it by default), or you - can install Jedi systemwide. - - The preferred way to install the Jedi library into your system is by using - pip_:: - - sudo pip install jedi - - If you want to install the current development version:: - - sudo pip install -e git://github.com/davidhalter/jedi.git#egg=jedi - - Note: This just installs the Jedi library, not the editor plugins. For - information about how to make it work with your editor, refer to the - corresponding documentation. - - - Support - ======= - - Jedi supports Python 2.5 up to 3.x. There is just one code base, for both - Python 2 and 3. - Jedi supports many of the widely used Python features: - - - builtin functions/classes support - - complex module / function / class structures - - ignores syntax and indentation errors - - multiple returns / yields - - tuple assignments / array indexing / dictionary indexing - - exceptions / with-statement - - \*args / \*\*kwargs - - decorators - - descriptors -> property / staticmethod / classmethod - - closures - - generators (yield statement) / iterators - - support for some magic methods: ``__call__``, ``__iter__``, ``__next__``, - ``__get__``, ``__getitem__``, ``__init__`` - - support for list.append, set.add, list.extend, etc. - - (nested) list comprehensions / ternary expressions - - relative imports - - ``getattr()`` / ``__getattr__`` / ``__getattribute__`` - - function annotations (py3k feature, are ignored right now, but being parsed. - I don't know what to do with them.) - - class decorators (py3k feature, are being ignored too, until I find a use - case, that doesn't work with Jedi) - - simple/usual ``sys.path`` modifications - - ``isinstance`` checks for if/while/assert - - virtualenv support - - infer function arguments with sphinx (and other) docstrings - - However, it does not yet support (and probably will in future versions, because - they are on my todo list): - - - manipulations of instances outside the instance variables, without using - functions - - It does not support (and most probably will not in future versions): - - - metaclasses (how could an auto-completion ever support this) - - ``setattr()``, ``__import__()`` - - Writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__`` - - evaluate ``if`` / ``while`` - - - Caveats - ======= - - This framework should work for both Python 2/3. However, some things were just - not as *pythonic* in Python 2 as things should be. To keep things simple, some - things have been held back: - - - Classes: Always Python 3 like, therefore all classes inherit from ``object``. - - Generators: No ``next`` method. The ``__next__`` method is used instead. - - Exceptions are only looked at in the form of ``Exception as e``, no comma! - - Syntax errors and other strange stuff, that is defined differently in the - Python language, may lead to undefined behaviour of the completion. Jedi is - **NOT** a Python compiler, that tries to correct you. It is a tool that wants - to help you. But **YOU** have to know Python, not Jedi. - - Importing ``numpy`` can be quite slow sometimes, as well as loading the builtins - the first time. If you want to speed it up, you could write import hooks in - jedi, which preloads this stuff. However, once loaded, this is not a problem - anymore. The same is true for huge modules like ``PySide``, ``wx``, etc. - - Security is an important issue for Jedi. Therefore no Python code is executed. - As long as you write pure python, everything is evaluated statically. But: If - you use builtin modules (`c_builtin`) there is no other option than to execute - those modules. However: Execute isn't that critical (as e.g. in pythoncomplete, - which used to execute *every* import!), because it means one import and no - more. So basically the only dangerous thing is using the import itself. If your - `c_builtin` uses some strange initializations, it might be dangerous. But if it - does you're screwed anyways, because eventualy you're going to execute your - code, which executes the import. - - - A little history - ================ - - The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit - of the precognition the Jedi have. There is even an awesome `scene - `_ of Monty Python Jedi's :-). - - But actually the name hasn't so much to do with Star Wars. It's part of my - second name. - - After I explained Guido van Rossum, how some parts of my auto-completion work, - he said (we drank a beer or two): - - *Oh, that worries me* - - When it's finished, I hope he'll like it :-) - - I actually started Jedi, because there were no good solutions available for - VIM. Most auto-completions just didn't work well. The only good solution was - PyCharm. I just like my good old VIM. Rope was never really intended to be an - auto-completion (and also I really hate project folders for my Python scripts). - It's more of a refactoring suite. So I decided to do my own version of a - completion, which would execute non-dangerous code. But I soon realized, that - this wouldn't work. So I built an extremely recursive thing which understands - many of Python's key features. - - By the way, I really tried to program it as understandable as possible. But I - think understanding it might need quite some time, because of its recursive - nature. - - - API-Design for IDEs - =================== - - If you want to set up an IDE with Jedi, you need to ``import jedi``. You should - have the following objects available: - - :: - - Script(source, line, column, source_path) - - ``source`` would be the source of your python file/script, separated by new - lines. ``line`` is the current line you want to perform actions on (starting - with line #1 as the first line). ``column`` represents the current - column/indent of the cursor (starting with zero). ``source_path`` should be the - path of your file in the file system. - - It returns a script object that contains the relevant information for the other - functions to work without params. - - :: - - Script().complete - - Returns ``api.Completion`` objects. Those objects have got - informations about the completions. More than just names. - - :: - - Script().goto - - Similar to complete. The returned ``api.Definition`` objects contain - information about the definitions found. - - :: - - Script().get_definition - - Mostly used for tests. Like goto, but follows statements and imports and - doesn't break there. You probably don't want to use this function. It's - mostly for testing. - - :: - - Script().related_names - - Returns all names that point to the definition of the name under the - cursor. This is also very useful for refactoring (renaming). - - :: - - Script().get_in_function_call - - Get the ``Function`` object of the call you're currently in, e.g.: ``abs(`` - with the cursor at the end would return the builtin ``abs`` function. - - :: - - NotFoundError - - If you use the goto function and no valid identifier (name) is at the - place of the cursor (position). It will raise this exception. - - :: - - set_debug_function - - Sets a callback function for ``debug.py``. This function is called with - multiple text objects, in python 3 you could insert ``print``. - - :: - - settings - - Access to the ``settings.py`` module. The settings are described there. - - - - .. _jedi-vim: http://github.com/davidhalter/jedi-vim - .. _pip: http://www.pip-installer.org/ - -Keywords: python completion refactoring vim -Platform: any -Classifier: Development Status :: 4 - Beta -Classifier: Environment :: Plugins -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Editors :: Integrated Development Environments (IDE) -Classifier: Topic :: Utilities diff --git a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/SOURCES.txt b/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/SOURCES.txt deleted file mode 100644 index c1d625b..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/SOURCES.txt +++ /dev/null @@ -1,31 +0,0 @@ -AUTHORS.txt -LICENSE.txt -MANIFEST.in -README.rst -setup.cfg -setup.py -jedi/__init__.py -jedi/_compatibility.py -jedi/api.py -jedi/api_classes.py -jedi/builtin.py -jedi/debug.py -jedi/docstrings.py -jedi/dynamic.py -jedi/evaluate.py -jedi/helpers.py -jedi/imports.py -jedi/keywords.py -jedi/modules.py -jedi/parsing.py -jedi/settings.py -jedi.egg-info/PKG-INFO -jedi.egg-info/SOURCES.txt -jedi.egg-info/dependency_links.txt -jedi.egg-info/top_level.txt -jedi/mixin/_functools.pym -jedi/mixin/_sre.pym -jedi/mixin/_weakref.pym -jedi/mixin/builtins.pym -jedi/mixin/datetime.pym -jedi/mixin/posix.pym \ No newline at end of file diff --git a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/dependency_links.txt b/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/installed-files.txt b/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/installed-files.txt deleted file mode 100644 index d4fc8a0..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/installed-files.txt +++ /dev/null @@ -1,41 +0,0 @@ -../jedi/builtin.py -../jedi/evaluate.py -../jedi/debug.py -../jedi/helpers.py -../jedi/settings.py -../jedi/keywords.py -../jedi/api_classes.py -../jedi/api.py -../jedi/modules.py -../jedi/__init__.py -../jedi/parsing.py -../jedi/docstrings.py -../jedi/_compatibility.py -../jedi/imports.py -../jedi/dynamic.py -../jedi/mixin/builtins.pym -../jedi/mixin/posix.pym -../jedi/mixin/_functools.pym -../jedi/mixin/_weakref.pym -../jedi/mixin/_sre.pym -../jedi/mixin/datetime.pym -../jedi/builtin.pyc -../jedi/evaluate.pyc -../jedi/debug.pyc -../jedi/helpers.pyc -../jedi/settings.pyc -../jedi/keywords.pyc -../jedi/api_classes.pyc -../jedi/api.pyc -../jedi/modules.pyc -../jedi/__init__.pyc -../jedi/parsing.pyc -../jedi/docstrings.pyc -../jedi/_compatibility.pyc -../jedi/imports.pyc -../jedi/dynamic.pyc -./ -SOURCES.txt -PKG-INFO -dependency_links.txt -top_level.txt diff --git a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/top_level.txt b/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/top_level.txt deleted file mode 100644 index 86c1cb1..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi-0.5b5.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jedi diff --git a/common/.local/lib/python2.7/site-packages/jedi/__init__.py b/common/.local/lib/python2.7/site-packages/jedi/__init__.py deleted file mode 100644 index 9f8c369..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -import sys - -# python imports are hell sometimes. Especially the combination of relative -# imports and circular imports... Just avoid it: -sys.path.insert(0, __path__[0]) - -from .api import Script, NotFoundError, set_debug_function -from . import settings - -from . import api - -__doc__ = api.__doc__ - -del api - -sys.path.pop(0) diff --git a/common/.local/lib/python2.7/site-packages/jedi/_compatibility.py b/common/.local/lib/python2.7/site-packages/jedi/_compatibility.py deleted file mode 100644 index d701526..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/_compatibility.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -This is a compatibility module, to make it possible to use jedi also with older -python versions. -""" -import sys - -is_py3k = sys.hexversion >= 0x03000000 - -is_py25 = sys.hexversion < 0x02060000 - -# next was defined in python 2.6, in python 3 obj.next won't be possible -# anymore -try: - next = next -except NameError: - _raiseStopIteration = object() - - def next(iterator, default=_raiseStopIteration): - if not hasattr(iterator, 'next'): - raise TypeError("not an iterator") - try: - return iterator.next() - except StopIteration: - if default is _raiseStopIteration: - raise - else: - return default - -# ast module was defined in python 2.6 -try: - from ast import literal_eval -except ImportError: - literal_eval = eval - - -# properties in 2.5 -try: - property.setter -except AttributeError: - class property(property): - def __init__(self, fget, *args, **kwargs): - self.__doc__ = fget.__doc__ - super(property, self).__init__(fget, *args, **kwargs) - - def setter(self, fset): - cls_ns = sys._getframe(1).f_locals - for k, v in cls_ns.iteritems(): - if v == self: - propname = k - break - cls_ns[propname] = property(self.fget, fset, - self.fdel, self.__doc__) - return cls_ns[propname] -else: - property = property - -# unicode function -try: - unicode = unicode -except NameError: - unicode = str - -if is_py3k: - utf8 = lambda s: s -else: - utf8 = lambda s: s.decode('utf-8') - -utf8.__doc__ = """ -Decode a raw string into unicode object. Do nothing in Python 3. -""" - -# exec function -if is_py3k: - def exec_function(source, global_map): - exec(source, global_map) -else: - eval(compile("""def exec_function(source, global_map): - exec source in global_map """, 'blub', 'exec')) - -# StringIO (Python 2.5 has no io module), so use io only for py3k -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - -# hasattr function used because python -if is_py3k: - hasattr = hasattr -else: - def hasattr(obj, name): - try: - getattr(obj, name) - return True - except AttributeError: - return False - - -class Python3Method(object): - def __init__(self, func): - self.func = func - - def __get__(self, obj, objtype): - if obj is None: - return lambda *args, **kwargs: self.func(*args, **kwargs) - else: - return lambda *args, **kwargs: self.func(obj, *args, **kwargs) - -try: - # the python3 way - from functools import reduce -except ImportError: - reduce = reduce - - -def use_metaclass(meta, *bases): - """ Create a class with a metaclass. """ - if not bases: - bases = (object,) - return meta("HackClass", bases, {}) - -try: - from inspect import cleandoc -except ImportError: - # python 2.5 doesn't have this method - import string - - def cleandoc(doc): - """Clean up indentation from docstrings. - - Any whitespace that can be uniformly removed from the second line - onwards is removed.""" - try: - lines = string.split(string.expandtabs(doc), '\n') - except UnicodeError: - return None - else: - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxint - for line in lines[1:]: - content = len(string.lstrip(line)) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip() - if margin < sys.maxint: - for i in range(1, len(lines)): - lines[i] = lines[i][margin:] - # Remove any trailing or leading blank lines. - while lines and not lines[-1]: - lines.pop() - while lines and not lines[0]: - lines.pop(0) - return string.join(lines, '\n') - -if is_py25: - # adds the `itertools.chain.from_iterable` constructor - import itertools - - class chain(itertools.chain): - @staticmethod - def from_iterable(iterables): - # chain.from_iterable(['ABC', 'DEF']) --> A B C D E F - for it in iterables: - for element in it: - yield element - itertools.chain = chain - del chain diff --git a/common/.local/lib/python2.7/site-packages/jedi/api.py b/common/.local/lib/python2.7/site-packages/jedi/api.py deleted file mode 100644 index 248991b..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/api.py +++ /dev/null @@ -1,457 +0,0 @@ -""" -Jedi is an autocompletion library for Python. It offers additonal -services such as goto / get_definition / pydoc support / -get_in_function_call / related names. - -To give you a simple exmple how you can use the jedi library, -here is an exmple for the autocompletion feature: - ->>> import jedi ->>> source = '''import json; json.l''' ->>> script = jedi.Script(source, 1, 19, '') ->>> script - ->>> completions = script.complete() ->>> completions -[, ] ->>> completions[0].complete -'oad' ->>> completions[0].word -'load' - -As you see Jedi is pretty simple and allows you to concentrate -writing a good text editor, while still having very good IDE features -for Python. -""" -from __future__ import with_statement -__all__ = ['Script', 'NotFoundError', 'set_debug_function'] - -import re - -import parsing -import dynamic -import imports -import evaluate -import modules -import debug -import settings -import keywords -import helpers -import builtin -import api_classes - -from _compatibility import next, unicode - - -class NotFoundError(Exception): - """ A custom error to avoid catching the wrong exceptions """ - pass - - -class Script(object): - """ - A Script is the base for a completion, goto or whatever call. - - :param source: The source code of the current file - :type source: string - :param line: The line to complete in. - :type line: int - :param col: The column to complete in. - :type col: int - :param source_path: The path in the os, the current module is in. - :type source_path: string or None - :param source_encoding: encoding for decoding `source`, when it - is not a `unicode` object. - :type source_encoding: string - """ - def __init__(self, source, line, column, source_path, - source_encoding='utf-8'): - debug.reset_time() - try: - source = unicode(source, source_encoding, 'replace') - # Use 'replace' over 'ignore' to hold code structure. - except TypeError: # `source` is already a unicode object - pass - self.pos = line, column - self.module = modules.ModuleWithCursor(source_path, source=source, - position=self.pos) - self.source_path = source_path - debug.speed('init') - - @property - def parser(self): - """ The lazy parser """ - return self.module.parser - - def complete(self): - """ - An auto completer for python files. - - :return: list of Completion objects, sorted by name and __ comes last. - :rtype: list - """ - def follow_imports_if_possible(name): - # TODO remove this, or move to another place (not used) - par = name.parent - if isinstance(par, parsing.Import) and not \ - isinstance(self.parser.user_stmt, parsing.Import): - new = imports.ImportPath(par).follow(is_goto=True) - # Only remove the old entry if a new one has been found. - #print par, new, par.parent - if new: - try: - return new - except AttributeError: # .name undefined - pass - return [name] - - - debug.speed('complete start') - path = self.module.get_path_until_cursor() - path, dot, like = self._get_completion_parts(path) - - try: - scopes = list(self._prepare_goto(path, True)) - except NotFoundError: - scopes = [] - scope_generator = evaluate.get_names_for_scope( - self.parser.user_scope, self.pos) - completions = [] - for scope, name_list in scope_generator: - for c in name_list: - completions.append((c, scope)) - else: - completions = [] - debug.dbg('possible scopes', scopes) - for s in scopes: - if s.isinstance(evaluate.Function): - names = s.get_magic_method_names() - else: - if isinstance(s, imports.ImportPath): - if like == 'import': - l = self.module.get_line(self.pos[0])[:self.pos[1]] - if not l.endswith('import import'): - continue - names = s.get_defined_names(on_import_stmt=True) - else: - names = s.get_defined_names() - - for c in names: - completions.append((c, s)) - - if not dot: # named_params have no dots - call_def = self.get_in_function_call() - if call_def: - if not call_def.module.is_builtin(): - for p in call_def.params: - completions.append((p.get_name(), p)) - - # Do the completion if there is no path before and no import stmt. - if (not scopes or not isinstance(scopes[0], imports.ImportPath)) \ - and not path: - # add keywords - bs = builtin.Builtin.scope - completions += ((k, bs) for k in keywords.get_keywords( - all=True)) - - needs_dot = not dot and path - - comps = [] - for c, s in set(completions): - n = c.names[-1] - if settings.case_insensitive_completion \ - and n.lower().startswith(like.lower()) \ - or n.startswith(like): - if not evaluate.filter_private_variable(s, - self.parser.user_stmt, n): - new = api_classes.Completion(c, needs_dot, - len(like), s) - comps.append(new) - - debug.speed('complete end') - - return sorted(comps, key=lambda x: (x.word.startswith('__'), - x.word.startswith('_'), - x.word.lower())) - - def _prepare_goto(self, goto_path, is_like_search=False): - """ Base for complete, goto and get_definition. Basically it returns - the resolved scopes under cursor. """ - debug.dbg('start: %s in %s' % (goto_path, self.parser.scope)) - - user_stmt = self.parser.user_stmt - debug.speed('parsed') - if not user_stmt and len(goto_path.split('\n')) > 1: - # If the user_stmt is not defined and the goto_path is multi line, - # something's strange. Most probably the backwards tokenizer - # matched to much. - return [] - - if isinstance(user_stmt, parsing.Import): - scopes = [self._get_on_import_stmt(is_like_search)[0]] - else: - # just parse one statement, take it and evaluate it - stmt = self._get_under_cursor_stmt(goto_path) - scopes = evaluate.follow_statement(stmt) - return scopes - - def _get_under_cursor_stmt(self, cursor_txt): - r = parsing.PyFuzzyParser(cursor_txt, self.source_path, no_docstr=True) - try: - stmt = r.module.statements[0] - except IndexError: - raise NotFoundError() - stmt.start_pos = self.pos - stmt.parent = self.parser.user_scope - return stmt - - def get_definition(self): - """ - Returns the definitions of a the path under the cursor. This is - not a goto function! This follows complicated paths and returns the - end, not the first definition. - The big difference of goto and get_definition is that goto doesn't - follow imports and statements. - Multiple objects may be returned, because Python itself is a dynamic - language, which means depending on an option you can have two different - versions of a function. - - :return: list of Definition objects, which are basically scopes. - :rtype: list - """ - def resolve_import_paths(scopes): - for s in scopes.copy(): - if isinstance(s, imports.ImportPath): - scopes.remove(s) - scopes.update(resolve_import_paths(set(s.follow()))) - return scopes - - goto_path = self.module.get_path_under_cursor() - - context = self.module.get_context() - if next(context) in ('class', 'def'): - scopes = set([self.module.parser.user_scope]) - elif not goto_path: - op = self.module.get_operator_under_cursor() - scopes = set([keywords.get_operator(op, self.pos)] if op else []) - else: - scopes = set(self._prepare_goto(goto_path)) - - scopes = resolve_import_paths(scopes) - - # add keywords - scopes |= keywords.get_keywords(string=goto_path, pos=self.pos) - - d = set([api_classes.Definition(s) for s in scopes - if not isinstance(s, imports.ImportPath._GlobalNamespace)]) - return sorted(d, key=lambda x: (x.module_path, x.start_pos)) - - def goto(self): - """ - Returns the first definition found by goto. This means: It doesn't - follow imports and statements. - Multiple objects may be returned, because Python itself is a dynamic - language, which means depending on an option you can have two different - versions of a function. - - :return: list of Definition objects, which are basically scopes. - """ - d = [api_classes.Definition(d) for d in set(self._goto()[0])] - return sorted(d, key=lambda x: (x.module_path, x.start_pos)) - - def _goto(self, add_import_name=False): - """ - Used for goto and related_names. - :param add_import_name: TODO add description - """ - def follow_inexistent_imports(defs): - """ Imports can be generated, e.g. following - `multiprocessing.dummy` generates an import dummy in the - multiprocessing module. The Import doesn't exist -> follow. - """ - definitions = set(defs) - for d in defs: - if isinstance(d.parent, parsing.Import) \ - and d.start_pos == (0, 0): - i = imports.ImportPath(d.parent).follow(is_goto=True) - definitions.remove(d) - definitions |= follow_inexistent_imports(i) - return definitions - - goto_path = self.module.get_path_under_cursor() - context = self.module.get_context() - if next(context) in ('class', 'def'): - user_scope = self.parser.user_scope - definitions = set([user_scope.name]) - search_name = str(user_scope.name) - elif isinstance(self.parser.user_stmt, parsing.Import): - s, name_part = self._get_on_import_stmt() - try: - definitions = [s.follow(is_goto=True)[0]] - except IndexError: - definitions = [] - search_name = str(name_part) - - if add_import_name: - import_name = self.parser.user_stmt.get_defined_names() - # imports have only one name - if name_part == import_name[0].names[-1]: - definitions.append(import_name[0]) - else: - stmt = self._get_under_cursor_stmt(goto_path) - defs, search_name = evaluate.goto(stmt) - definitions = follow_inexistent_imports(defs) - return definitions, search_name - - def related_names(self, additional_module_paths=[]): - """ - Returns `dynamic.RelatedName` objects, which contain all names, that - are defined by the same variable, function, class or import. - This function can be used either to show all the usages of a variable - or for renaming purposes. - - TODO implement additional_module_paths - """ - user_stmt = self.parser.user_stmt - definitions, search_name = self._goto(add_import_name=True) - if isinstance(user_stmt, parsing.Statement) \ - and self.pos < user_stmt.get_assignment_calls().start_pos: - # the search_name might be before `=` - definitions = [v for v in user_stmt.set_vars - if str(v) == search_name] - if not isinstance(user_stmt, parsing.Import): - # import case is looked at with add_import_name option - definitions = dynamic.related_name_add_import_modules(definitions, - search_name) - - module = set([d.get_parent_until() for d in definitions]) - module.add(self.parser.module) - names = dynamic.related_names(definitions, search_name, module) - - for d in set(definitions): - if isinstance(d, parsing.Module): - names.append(api_classes.RelatedName(d, d)) - else: - names.append(api_classes.RelatedName(d.names[0], d)) - - return sorted(set(names), key=lambda x: (x.module_path, x.start_pos), - reverse=True) - - def get_in_function_call(self): - """ - Return the function, that the cursor is in, e.g.: - >>> isinstance(| # | <-- cursor is here - - This would return the `isinstance` function. In contrary: - >>> isinstance()| # | <-- cursor is here - - This would return `None`. - """ - def check_user_stmt(user_stmt): - if user_stmt is None \ - or not isinstance(user_stmt, parsing.Statement): - return None, 0 - ass = helpers.fast_parent_copy(user_stmt.get_assignment_calls()) - - call, index, stop = helpers.scan_array_for_pos(ass, self.pos) - return call, index - - def check_cache(): - """ Do the parsing with a part parser, therefore reduce ressource - costs. - TODO this is not working with multi-line docstrings, improve. - """ - if self.source_path is None: - return None, 0 - - try: - timestamp, parser = builtin.CachedModule.cache[ - self.source_path] - except KeyError: - return None, 0 - part_parser = self.module.get_part_parser() - user_stmt = part_parser.user_stmt - call, index = check_user_stmt(user_stmt) - if call: - old_stmt = parser.module.get_statement_for_position(self.pos) - if old_stmt is None: - return None, 0 - old_call, old_index = check_user_stmt(old_stmt) - if old_call: - # compare repr because that should definitely be the same. - # Otherwise the whole thing is out of sync. - if repr(old_call) == repr(call): - # return the index of the part_parser - return old_call, index - return None, 0 - else: - raise NotFoundError() - - debug.speed('func_call start') - try: - call, index = check_cache() - except NotFoundError: - return None - debug.speed('func_call parsed') - - if call is None: - # This is a backup, if the above is not successful. - user_stmt = self.parser.user_stmt - call, index = check_user_stmt(user_stmt) - if call is None: - return None - - debug.speed('func_call user_stmt') - with helpers.scale_speed_settings(settings.scale_get_in_function_call): - origins = evaluate.follow_call(call) - debug.speed('func_call followed') - - if len(origins) == 0: - return None - # just take entry zero, because we need just one. - executable = origins[0] - - return api_classes.CallDef(executable, index, call) - - def _get_on_import_stmt(self, is_like_search=False): - """ Resolve the user statement, if it is an import. Only resolve the - parts until the user position. """ - user_stmt = self.parser.user_stmt - import_names = user_stmt.get_all_import_names() - kill_count = -1 - cur_name_part = None - for i in import_names: - if user_stmt.alias == i: - continue - for name_part in i.names: - if name_part.end_pos >= self.pos: - if not cur_name_part: - cur_name_part = name_part - kill_count += 1 - - i = imports.ImportPath(user_stmt, is_like_search, - kill_count=kill_count, direct_resolve=True) - return i, cur_name_part - - def _get_completion_parts(self, path): - """ - Returns the parts for the completion - :return: tuple - (path, dot, like) - """ - match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path, flags=re.S) - return match.groups() - - def __del__(self): - evaluate.clear_caches() - - -def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, - notices=True, speed=True): - """ - You can define a callback debug function to get all the debug messages. - :param func_cb: The callback function for debug messages, with n params. - """ - debug.debug_function = func_cb - debug.enable_warning = warnings - debug.enable_notice = notices - debug.enable_speed = speed diff --git a/common/.local/lib/python2.7/site-packages/jedi/api_classes.py b/common/.local/lib/python2.7/site-packages/jedi/api_classes.py deleted file mode 100644 index 367615a..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/api_classes.py +++ /dev/null @@ -1,311 +0,0 @@ -""" The classes returned by the api """ - -import re -import os - -import settings -import evaluate -import imports -import parsing -import keywords - - -class BaseDefinition(object): - _mapping = {'posixpath': 'os.path', - 'riscospath': 'os.path', - 'ntpath': 'os.path', - 'os2emxpath': 'os.path', - 'macpath': 'os.path', - 'genericpath': 'os.path', - '_io': 'io', - '__builtin__': '', - 'builtins': '', - } - - _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { - 'argparse._ActionsContainer': 'argparse.ArgumentParser', - '_sre.SRE_Match': 're.MatchObject', - '_sre.SRE_Pattern': 're.RegexObject', - }.items()) - - def __init__(self, definition, start_pos): - self.start_pos = start_pos - self.definition = definition - self.is_keyword = isinstance(definition, keywords.Keyword) - - # generate a path to the definition - self.module_path = str(definition.get_parent_until().path) - - @property - def type(self): - # generate the type - stripped = self.definition - if isinstance(self.definition, evaluate.InstanceElement): - stripped = self.definition.var - return type(stripped).__name__ - - @property - def path(self): - path = [] - if not isinstance(self.definition, keywords.Keyword): - par = self.definition - while par is not None: - try: - path.insert(0, par.name) - except AttributeError: - pass - par = par.parent - return path - - @property - def module_name(self): - path = self.module_path - sep = os.path.sep - p = re.sub(r'^.*?([\w\d]+)(%s__init__)?.py$' % sep, r'\1', path) - return p - - def in_builtin_module(self): - return not self.module_path.endswith('.py') - - @property - def line_nr(self): - return self.start_pos[0] - - @property - def column(self): - return self.start_pos[1] - - @property - def doc(self): - """ Return a document string for this completion object. """ - try: - return self.definition.doc - except AttributeError: - return self.raw_doc - - @property - def raw_doc(self): - """ Returns the raw docstring `__doc__` for any object """ - try: - return str(self.definition.docstr) - except AttributeError: - return '' - - @property - def description(self): - return str(self.definition) - - @property - def full_name(self): - """ - Returns the path to a certain class/function, see #61. - """ - path = [str(p) for p in self.path] - # TODO add further checks, the mapping should only occur on stdlib. - try: - path[0] = self._mapping[path[0]] - except KeyError: - pass - for key, repl in self._tuple_mapping.items(): - if tuple(path[:len(key)]) == key: - path = [repl] + path[len(key):] - - return '.'.join(path if path[0] else path[1:]) - - def __repr__(self): - return "<%s %s>" % (type(self).__name__, self.description) - - -class Completion(BaseDefinition): - """ `Completion` objects are returned from `Script.complete`. Providing - some useful functions for IDE's. """ - def __init__(self, name, needs_dot, like_name_length, base): - super(Completion, self).__init__(name.parent, name.start_pos) - - self.name = name - self.needs_dot = needs_dot - self.like_name_length = like_name_length - self.base = base - - self._followed_definitions = None - - @property - def complete(self): - """ Delievers the rest of the word, e.g. completing `isinstance` - >>> isinstan - - would return the string 'ce'. It also adds additional stuff, depending - on your `settings.py` - """ - dot = '.' if self.needs_dot else '' - append = '' - if settings.add_bracket_after_function \ - and self.type == 'Function': - append = '(' - - if settings.add_dot_after_module: - if isinstance(self.base, parsing.Module): - append += '.' - if isinstance(self.base, parsing.Param): - append += '=' - return dot + self.name.names[-1][self.like_name_length:] + append - - @property - def word(self): - """ In contrary to `complete` returns the whole word, e.g. - >>> isinstan - - would return 'isinstance'. - """ - return str(self.name.names[-1]) - - @property - def description(self): - """ Provides a description of the completion object - TODO return value is just __repr__ of some objects, improve! """ - parent = self.name.parent - if parent is None: - return '' - t = self.type - if t == 'Statement' or t == 'Import': - desc = self.definition.get_code(False) - else: - desc = '.'.join(str(p) for p in self.path) - - line_nr = '' if self.in_builtin_module else '@%s' % self.line_nr - return '%s: %s%s' % (t, desc, line_nr) - - def follow_definition(self): - """ Returns you the original definitions. I strongly recommend not - using it for your completions, because it might slow down Jedi. If you - want to read only a few objects (<=20). I think it might be useful, - especially to get the original docstrings. - The basic problem of this function is that it follows all results. This - means with 1000 completions (e.g. numpy), it's just PITA slow. - """ - if self._followed_definitions is None: - if self.definition.isinstance(parsing.Statement): - defs = evaluate.follow_statement(self.definition) - elif self.definition.isinstance(parsing.Import): - defs = imports.strip_imports([self.definition]) - else: - return [self] - - self._followed_definitions = \ - [BaseDefinition(d, d.start_pos) for d in defs] - evaluate.clear_caches() - - return self._followed_definitions - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.name) - - -class Definition(BaseDefinition): - """ These are the objects returned by either `Script.goto` or - `Script.get_definition`. """ - def __init__(self, definition): - super(Definition, self).__init__(definition, definition.start_pos) - - @property - def description(self): - """ A description of the Definition object, which is heavily used in - testing. e.g. for `isinstance` it returns 'def isinstance' """ - d = self.definition - if isinstance(d, evaluate.InstanceElement): - d = d.var - if isinstance(d, evaluate.parsing.Name): - d = d.parent - - if isinstance(d, evaluate.Array): - d = 'class ' + d.type - elif isinstance(d, (parsing.Class, evaluate.Class, evaluate.Instance)): - d = 'class ' + str(d.name) - elif isinstance(d, (evaluate.Function, evaluate.parsing.Function)): - d = 'def ' + str(d.name) - elif isinstance(d, evaluate.parsing.Module): - # only show module name - d = 'module %s' % self.module_name - elif self.is_keyword: - d = 'keyword %s' % d.name - else: - d = d.get_code().replace('\n', '') - return d - - @property - def desc_with_module(self): - """ In addition to the Definition, it also returns the module. Don't - use it yet, its behaviour may change. If you really need it, talk to me - TODO add full path. This function is should return a - module.class.function path. """ - if self.module_path.endswith('.py') \ - and not isinstance(self.definition, parsing.Module): - position = '@%s' % (self.line_nr) - else: - # is a builtin or module - position = '' - return "%s:%s%s" % (self.module_name, self.description, position) - - -class RelatedName(BaseDefinition): - def __init__(self, name_part, scope): - super(RelatedName, self).__init__(scope, name_part.start_pos) - self.name_part = name_part - self.text = str(name_part) - self.end_pos = name_part.end_pos - - @property - def description(self): - return "%s@%s,%s" % (self.text, self.start_pos[0], self.start_pos[1]) - - def __eq__(self, other): - return self.start_pos == other.start_pos \ - and self.module_path == other.module_path - - def __hash__(self): - return hash((self.start_pos, self.module_path)) - - -class CallDef(object): - """ `CallDef` objects is the return value of `Script.get_in_function_call`. - It knows what functions you are currently in. e.g. `isinstance(` would - return the `isinstance` function. without `(` it would return nothing.""" - def __init__(self, executable, index, call): - self.executable = executable - self.index = index - self.call = call - - @property - def params(self): - if self.executable.isinstance(evaluate.Function): - if isinstance(self.executable, evaluate.InstanceElement): - return self.executable.params[1:] - return self.executable.params - else: - try: - sub = self.executable.get_subscope_by_name('__init__') - return sub.params[1:] # ignore self - except KeyError: - return [] - - @property - def bracket_start(self): - """ The indent of the bracket that is responsible for the last function - call. """ - c = self.call - while c.next is not None: - c = c.next - return c.name.end_pos - - @property - def call_name(self): - """ The name (e.g. 'isinstance') as a string. """ - return str(self.executable.name) - - @property - def module(self): - return self.executable.get_parent_until() - - def __repr__(self): - return '<%s: %s index %s>' % (type(self).__name__, self.executable, - self.index) diff --git a/common/.local/lib/python2.7/site-packages/jedi/builtin.py b/common/.local/lib/python2.7/site-packages/jedi/builtin.py deleted file mode 100644 index da4fc56..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/builtin.py +++ /dev/null @@ -1,472 +0,0 @@ -from __future__ import with_statement -from _compatibility import exec_function, is_py3k - -import re -import sys -import os -if is_py3k: - import io -import types -import inspect - -import debug -import parsing -import imports -import evaluate - - -def get_sys_path(): - def check_virtual_env(sys_path): - """ Add virtualenv's site-packages to the `sys.path`.""" - venv = os.getenv('VIRTUAL_ENV') - if not venv: - return - venv = os.path.abspath(venv) - p = os.path.join( - venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages') - sys_path.insert(0, p) - - p = sys.path[1:] - check_virtual_env(p) - return p - - -class CachedModule(object): - """ - The base type for all modules, which is not to be confused with - `parsing.Module`. Caching happens here. - """ - cache = {} - - def __init__(self, path=None, name=None): - self.path = path and os.path.abspath(path) - self.name = name - self._parser = None - - @property - def parser(self): - """ get the parser lazy """ - if not self._parser: - try: - timestamp, parser = self.cache[self.path or self.name] - if not self.path or os.path.getmtime(self.path) <= timestamp: - self._parser = parser - else: - # In case there is already a module cached and this module - # has to be reparsed, we also need to invalidate the import - # caches. - imports.invalidate_star_import_cache(parser.module) - raise KeyError() - except KeyError: - self._load_module() - return self._parser - - def _get_source(self): - raise NotImplementedError() - - def _load_module(self): - source = self._get_source() - self._parser = parsing.PyFuzzyParser(source, self.path or self.name) - p_time = None if not self.path else os.path.getmtime(self.path) - - if self.path or self.name: - self.cache[self.path or self.name] = p_time, self._parser - - -class Parser(CachedModule): - """ - This module is a parser for all builtin modules, which are programmed in - C/C++. It should also work on third party modules. - It can be instantiated with either a path or a name of the module. The path - is important for third party modules. - - :param name: The name of the module. - :param path: The path of the module. - :param sys_path: The sys.path, which is can be customizable. - """ - - map_types = { - 'floating point number': '0.0', - 'string': '""', - 'str': '""', - 'character': '"a"', - 'integer': '0', - 'int': '0', - 'dictionary': '{}', - 'list': '[]', - 'file object': 'file("")', - # TODO things like dbg: ('not working', 'tuple of integers') - } - - if is_py3k: - map_types['file object'] = 'import io; return io.TextIOWrapper()' - - module_cache = {} - - def __init__(self, path=None, name=None, sys_path=None): - if sys_path is None: - sys_path = get_sys_path() - if not name: - name = os.path.basename(path) - name = name.rpartition('.')[0] # cut file type (normally .so) - super(Parser, self).__init__(path=path, name=name) - - self.sys_path = list(sys_path) - self._module = None - - @property - def module(self): - def load_module(name, path): - if path: - self.sys_path.insert(0, path) - - temp, sys.path = sys.path, self.sys_path - content = {} - try: - exec_function('import %s as module' % name, content) - self._module = content['module'] - except AttributeError: - # use sys.modules, because you cannot access some modules - # directly. -> #59 - self._module = sys.modules[name] - sys.path = temp - - if path: - self.sys_path.pop(0) - - # module might already be defined - if not self._module: - path = self.path - name = self.name - if self.path: - - dot_path = [] - p = self.path - # search for the builtin with the correct path - while p and p not in sys.path: - p, sep, mod = p.rpartition(os.path.sep) - dot_path.append(mod.partition('.')[0]) - if p: - name = ".".join(reversed(dot_path)) - path = p - else: - path = os.path.dirname(self.path) - - load_module(name, path) - return self._module - - def _get_source(self): - """ Override this abstract method """ - return _generate_code(self.module, self._load_mixins()) - - def _load_mixins(self): - """ - Load functions that are mixed in to the standard library. - E.g. builtins are written in C (binaries), but my autocompletion only - understands Python code. By mixing in Python code, the autocompletion - should work much better for builtins. - """ - regex = r'^(def|class)\s+([\w\d]+)' - - def process_code(code, depth=0): - funcs = {} - matches = list(re.finditer(regex, code, re.MULTILINE)) - positions = [m.start() for m in matches] - for i, pos in enumerate(positions): - try: - code_block = code[pos:positions[i + 1]] - except IndexError: - code_block = code[pos:len(code)] - structure_name = matches[i].group(1) - name = matches[i].group(2) - if structure_name == 'def': - funcs[name] = code_block - elif structure_name == 'class': - if depth > 0: - raise NotImplementedError() - - # remove class line - c = re.sub(r'^[^\n]+', '', code_block) - # remove whitespace - c = re.compile(r'^[ ]{4}', re.MULTILINE).sub('', c) - - funcs[name] = process_code(c) - else: - raise NotImplementedError() - return funcs - - try: - name = self.name - if name == '__builtin__' and not is_py3k: - name = 'builtins' - path = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.sep.join([path, 'mixin', name]) + '.pym') as f: - s = f.read() - except IOError: - return {} - else: - mixin_dct = process_code(s) - if is_py3k and self.name == Builtin.name: - # in the case of Py3k xrange is now range - mixin_dct['range'] = mixin_dct['xrange'] - return mixin_dct - - -def _generate_code(scope, mixin_funcs={}, depth=0): - """ - Generate a string, which uses python syntax as an input to the - PyFuzzyParser. - """ - def get_doc(obj, indent=False): - doc = inspect.getdoc(obj) - if doc: - doc = ('r"""\n%s\n"""\n' % doc) - if indent: - doc = parsing.indent_block(doc) - return doc - return '' - - def is_in_base_classes(cls, name, comparison): - """ Base classes may contain the exact same object """ - if name in mixin_funcs: - return False - try: - mro = cls.mro() - except TypeError: - # this happens, if cls == type - return False - for base in mro[1:]: - try: - attr = getattr(base, name) - except AttributeError: - continue - if attr == comparison: - return True - return False - - def get_scope_objects(names): - """ - Looks for the names defined with dir() in an objects and divides - them into different object types. - """ - classes = {} - funcs = {} - stmts = {} - members = {} - for n in names: - try: - # this has a builtin_function_or_method - exe = getattr(scope, n) - except AttributeError: - # happens e.g. in properties of - # PyQt4.QtGui.QStyleOptionComboBox.currentText - # -> just set it to None - members[n] = None - else: - if inspect.isclass(scope): - if is_in_base_classes(scope, n, exe): - continue - if inspect.isbuiltin(exe) or inspect.ismethod(exe) \ - or inspect.ismethoddescriptor(exe): - funcs[n] = exe - elif inspect.isclass(exe): - classes[n] = exe - elif inspect.ismemberdescriptor(exe): - members[n] = exe - else: - stmts[n] = exe - return classes, funcs, stmts, members - - code = '' - if inspect.ismodule(scope): # generate comment where the code's from. - try: - path = scope.__file__ - except AttributeError: - path = '?' - code += '# Generated module %s from %s\n' % (scope.__name__, path) - - code += get_doc(scope) - - names = set(dir(scope)) - set(['__file__', '__name__', '__doc__', - '__path__', '__package__']) \ - | set(['mro']) - - classes, funcs, stmts, members = get_scope_objects(names) - - # classes - for name, cl in classes.items(): - bases = (c.__name__ for c in cl.__bases__) - code += 'class %s(%s):\n' % (name, ','.join(bases)) - if depth == 0: - try: - mixin = mixin_funcs[name] - except KeyError: - mixin = {} - cl_code = _generate_code(cl, mixin, depth + 1) - code += parsing.indent_block(cl_code) - code += '\n' - - # functions - for name, func in funcs.items(): - params, ret = parse_function_doc(func) - if depth > 0: - params = 'self, ' + params - doc_str = get_doc(func, indent=True) - try: - mixin = mixin_funcs[name] - except KeyError: - # normal code generation - code += 'def %s(%s):\n' % (name, params) - code += doc_str - code += parsing.indent_block('%s\n\n' % ret) - else: - # generation of code with mixins - # the parser only supports basic functions with a newline after - # the double dots - # find doc_str place - pos = re.search(r'\):\s*\n', mixin).end() - if pos is None: - raise Exception("Builtin function not parsed correctly") - code += mixin[:pos] + doc_str + mixin[pos:] - - # class members (functions) properties? - for name, func in members.items(): - # recursion problem in properties TODO remove - if name in ['fget', 'fset', 'fdel']: - continue - ret = 'pass' - code += '@property\ndef %s(self):\n' % (name) - code += parsing.indent_block(get_doc(func) + '%s\n\n' % ret) - - # variables - for name, value in stmts.items(): - if is_py3k: - file_type = io.TextIOWrapper - else: - file_type = types.FileType - if type(value) == file_type: - value = 'open()' - elif name == 'None': - value = '' - elif type(value).__name__ in ['int', 'bool', 'float', - 'dict', 'list', 'tuple']: - value = repr(value) - else: - # get the type, if the type is not simple. - mod = type(value).__module__ - value = type(value).__name__ + '()' - if mod != '__builtin__': - value = '%s.%s' % (mod, value) - code += '%s = %s\n' % (name, value) - - if depth == 0: - #with open('writeout.py', 'w') as f: - # f.write(code) - #import sys - #sys.stdout.write(code) - #exit() - pass - return code - - -def parse_function_doc(func): - """ - Takes a function and returns the params and return value as a tuple. - This is nothing more than a docstring parser. - """ - # TODO: things like utime(path, (atime, mtime)) and a(b [, b]) -> None - doc = inspect.getdoc(func) - - # get full string, parse round parentheses: def func(a, (b,c)) - try: - count = 0 - debug.dbg(func, func.__name__, doc) - start = doc.index('(') - for i, s in enumerate(doc[start:]): - if s == '(': - count += 1 - elif s == ')': - count -= 1 - if count == 0: - end = start + i - break - param_str = doc[start + 1:end] - - # remove square brackets, that show an optional param ( = None) - def change_options(m): - args = m.group(1).split(',') - for i, a in enumerate(args): - if a and '=' not in a: - args[i] += '=None' - return ','.join(args) - while True: - param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', - change_options, param_str) - if changes == 0: - break - except (ValueError, AttributeError): - debug.dbg('no brackets found - no param') - end = 0 - param_str = '' - - param_str = param_str.replace('-', '_') # see: isinstance.__doc__ - - if doc is not None: - r = re.search('-[>-]* ', doc[end:end + 7]) - if doc is None or r is None: - ret = 'pass' - else: - index = end + r.end() - # get result type, which can contain newlines - pattern = re.compile(r'(,\n|[^\n-])+') - ret_str = pattern.match(doc, index).group(0).strip() - # New object -> object() - ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) - - ret = Parser.map_types.get(ret_str, ret_str) - if ret == ret_str and ret not in ['None', 'object', 'tuple', 'set']: - debug.dbg('not working', ret_str) - if ret != 'pass': - ret = ('return ' if 'return' not in ret else '') + ret - return param_str, ret - - -class Builtin(object): - """ The builtin scope / module """ - # Python 3 compatibility - if is_py3k: - name = 'builtins' - else: - name = '__builtin__' - - _builtin = None - - @property - def builtin(self): - if self._builtin is None: - self._builtin = Parser(name=self.name) - return self._builtin - - @property - def scope(self): - return self.builtin.parser.module - - @property - def magic_function_scope(self): - try: - return self._magic_function_scope - except AttributeError: - # depth = 1 because this is not a module - class Container(object): - FunctionType = types.FunctionType - source = _generate_code(Container, depth=0) - parser = parsing.PyFuzzyParser(source, None) - module = parser.module - module.parent = self.scope - typ = evaluate.follow_path(iter(['FunctionType']), module, module) - - s = self._magic_function_scope = typ.pop() - return s - - -Builtin = Builtin() diff --git a/common/.local/lib/python2.7/site-packages/jedi/debug.py b/common/.local/lib/python2.7/site-packages/jedi/debug.py deleted file mode 100644 index 321ffa8..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/debug.py +++ /dev/null @@ -1,64 +0,0 @@ -import inspect -import time - -try: - # Use colorama for nicer console output. - from colorama import Fore, init - init() -except ImportError: - class Fore(object): - RED = '' - GREEN = '' - YELLOW = '' - RESET = '' - -NOTICE = object() -WARNING = object() -SPEED = object() - -enable_speed = False -enable_warning = False -enable_notice = False - -# callback, interface: level, str -debug_function = None -ignored_modules = ['parsing', 'builtin', 'jedi.builtin', 'jedi.parsing'] - - -def reset_time(): - global start_time - start_time = time.time() - - -def dbg(*args): - """ Looks at the stack, to see if a debug message should be printed. """ - if debug_function and enable_notice: - frm = inspect.stack()[1] - mod = inspect.getmodule(frm[0]) - if not (mod.__name__ in ignored_modules): - debug_function(NOTICE, 'dbg: ' + ', '.join(str(a) for a in args)) - - -def warning(*args): - if debug_function and enable_warning: - debug_function(WARNING, 'warning: ' + ', '.join(str(a) for a in args)) - - -def speed(name): - if debug_function and enable_speed: - now = time.time() - debug_function(SPEED, 'speed: ' + '%s %s' % (name, now - start_time)) - - -def print_to_stdout(level, str_out): - """ The default debug function """ - if level == NOTICE: - col = Fore.GREEN - elif level == WARNING: - col = Fore.RED - else: - col = Fore.YELLOW - print(col + str_out + Fore.RESET) - - -#debug_function = print_to_stdout diff --git a/common/.local/lib/python2.7/site-packages/jedi/docstrings.py b/common/.local/lib/python2.7/site-packages/jedi/docstrings.py deleted file mode 100644 index 29bf0d2..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/docstrings.py +++ /dev/null @@ -1,34 +0,0 @@ -""" Processing of docstrings, which means parsing for types. """ - -import re - -import evaluate - - -#@evaluate.memoize_default() # TODO add -def follow_param(param): - func = param.parent_function - #print func, param, param.parent_function - param_str = search_param_in_docstr(func.docstr, str(param.get_name())) - - if param_str is not None: - scope = func.get_parent_until() - return evaluate.get_scopes_for_name(scope, param_str, - search_global=True) - return [] - - -def search_param_in_docstr(docstr, param_str): - lines = docstr.split('\n') - - # look at #40 to see definitions of those params - sphinx_comp = ':type %s:' % param_str - googley_comp = re.compile('\s*%s\s+\(([^()]+)\)' % re.escape(param_str)) - for l in lines: - if l.startswith(sphinx_comp): - return l.replace(sphinx_comp, '', 1).strip() - - r = re.match(googley_comp, l) - if r is not None: - return r.group(1) - return None diff --git a/common/.local/lib/python2.7/site-packages/jedi/dynamic.py b/common/.local/lib/python2.7/site-packages/jedi/dynamic.py deleted file mode 100644 index 2e77546..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/dynamic.py +++ /dev/null @@ -1,507 +0,0 @@ -""" -For dynamic completion. - -Sorry to everyone who is reading this code. Especially the array parts are -really cryptic and not understandable. It's just a hack, that turned out to be -working quite good. -""" -from __future__ import with_statement - -import os - -import parsing -import modules -import evaluate -import helpers -import settings -import debug -import builtin -import imports -import api_classes - -# This is something like the sys.path, but only for searching params. It means -# that this is the order in which Jedi searches params. -search_param_modules = ['.'] -search_param_cache = {} - - -def get_directory_modules_for_name(mods, name): - """ - Search a name in the directories of modules. - """ - def check_python_file(path): - try: - return builtin.CachedModule.cache[path][1].module - except KeyError: - try: - return check_fs(path) - except IOError: - return None - - def check_fs(path): - with open(path) as f: - source = f.read() - if name in source: - return modules.Module(path, source).parser.module - - # skip non python modules - mods = set(m for m in mods if m.path.endswith('.py')) - mod_paths = set() - for m in mods: - mod_paths.add(m.path) - yield m - - if settings.dynamic_params_for_other_modules: - paths = set(settings.additional_dynamic_modules) - for p in mod_paths: - d = os.path.dirname(p) - for entry in os.listdir(d): - if entry not in mod_paths: - if entry.endswith('.py'): - paths.add(d + os.path.sep + entry) - - for p in paths: - c = check_python_file(p) - if c is not None and c not in mods: - yield c - - -def search_param_memoize(func): - """ - Is only good for search params memoize, respectively the closure, - because it just caches the input, not the func, like normal memoize does. - """ - def wrapper(*args, **kwargs): - key = (args, frozenset(kwargs.items())) - if key in search_param_cache: - return search_param_cache[key] - else: - rv = func(*args, **kwargs) - search_param_cache[key] = rv - return rv - return wrapper - - -class ParamListener(object): - """ - This listener is used to get the params for a function. - """ - def __init__(self): - self.param_possibilities = [] - - def execute(self, params): - self.param_possibilities.append(params) - - -@evaluate.memoize_default([]) -def search_params(param): - """ - This is a dynamic search for params. If you try to complete a type: - >>> def func(foo): - >>> # here is the completion - >>> foo - >>> func(1) - >>> func("") - - It is not known what the type is, because it cannot be guessed with - recursive madness. Therefore one has to analyse the statements that are - calling the function, as well as analyzing the incoming params. - """ - if not settings.dynamic_params: - return [] - - def get_params_for_module(module): - """ - Returns the values of a param, or an empty array. - """ - @search_param_memoize - def get_posibilities(module, func_name): - try: - possible_stmts = module.used_names[func_name] - except KeyError: - return [] - - for stmt in possible_stmts: - if not isinstance(stmt, parsing.Import): - calls = _scan_array(stmt.get_assignment_calls(), func_name) - for c in calls: - # no execution means that params cannot be set - call_path = c.generate_call_path() - pos = c.start_pos - scope = stmt.parent - evaluate.follow_call_path(call_path, scope, pos) - return listener.param_possibilities - - result = [] - for params in get_posibilities(module, func_name): - for p in params: - if str(p) == param_name: - result += evaluate.follow_statement(p.parent) - return result - - func = param.get_parent_until(parsing.Function) - current_module = param.get_parent_until() - func_name = str(func.name) - if func_name == '__init__' and isinstance(func.parent, parsing.Class): - func_name = str(func.parent.name) - - # get the param name - if param.assignment_details: - arr = param.assignment_details[0][1] - else: - arr = param.get_assignment_calls() - offset = 1 if arr[0][0] in ['*', '**'] else 0 - param_name = str(arr[0][offset].name) - - # add the listener - listener = ParamListener() - func.listeners.add(listener) - - result = [] - # This is like backtracking: Get the first possible result. - for mod in get_directory_modules_for_name([current_module], func_name): - result = get_params_for_module(mod) - if result: - break - - # cleanup: remove the listener; important: should not stick. - func.listeners.remove(listener) - - return result - - -def check_array_additions(array): - """ Just a mapper function for the internal _check_array_additions """ - if array._array.type not in ['list', 'set']: - # TODO also check for dict updates - return [] - - is_list = array._array.type == 'list' - current_module = array._array.parent_stmt.get_parent_until() - res = _check_array_additions(array, current_module, is_list) - return res - - -def _scan_array(arr, search_name): - """ Returns the function Call that match search_name in an Array. """ - result = [] - for sub in arr: - for s in sub: - if isinstance(s, parsing.Array): - result += _scan_array(s, search_name) - elif isinstance(s, parsing.Call): - s_new = s - while s_new is not None: - n = s_new.name - if isinstance(n, parsing.Name) and search_name in n.names: - result.append(s) - - if s_new.execution is not None: - result += _scan_array(s_new.execution, search_name) - s_new = s_new.next - return result - - -counter = 0 -def dec(func): - """ TODO delete this """ - def wrapper(*args, **kwargs): - global counter - element = args[0] - if isinstance(element, evaluate.Array): - stmt = element._array.parent_stmt - else: - # must be instance - stmt = element.var_args.parent_stmt - print(' ' * counter + 'recursion,', stmt) - counter += 1 - res = func(*args, **kwargs) - counter -= 1 - #print ' '*counter + 'end,' - return res - return wrapper - - -#@dec -@evaluate.memoize_default([]) -def _check_array_additions(compare_array, module, is_list): - """ - Checks if a `parsing.Array` has "add" statements: - >>> a = [""] - >>> a.append(1) - """ - if not settings.dynamic_array_additions or module.is_builtin(): - return [] - - def check_calls(calls, add_name): - """ - Calls are processed here. The part before the call is searched and - compared with the original Array. - """ - result = [] - for c in calls: - call_path = list(c.generate_call_path()) - separate_index = call_path.index(add_name) - if add_name == call_path[-1] or separate_index == 0: - # this means that there is no execution -> [].append - # or the keyword is at the start -> append() - continue - backtrack_path = iter(call_path[:separate_index]) - - position = c.start_pos - scope = c.parent_stmt.parent - - found = evaluate.follow_call_path(backtrack_path, scope, position) - if not compare_array in found: - continue - - params = call_path[separate_index + 1] - if not params.values: - continue # no params: just ignore it - if add_name in ['append', 'add']: - result += evaluate.follow_call_list(params) - elif add_name in ['insert']: - try: - second_param = params[1] - except IndexError: - continue - else: - result += evaluate.follow_call_list([second_param]) - elif add_name in ['extend', 'update']: - iterators = evaluate.follow_call_list(params) - result += evaluate.get_iterator_types(iterators) - return result - - def get_execution_parent(element, *stop_classes): - """ Used to get an Instance/Execution parent """ - if isinstance(element, evaluate.Array): - stmt = element._array.parent_stmt - else: - # must be instance - stmt = element.var_args.parent_stmt - if isinstance(stmt, evaluate.InstanceElement): - stop_classes = list(stop_classes) + [evaluate.Function] - return stmt.get_parent_until(stop_classes) - - temp_param_add = settings.dynamic_params_for_other_modules - settings.dynamic_params_for_other_modules = False - - search_names = ['append', 'extend', 'insert'] if is_list else \ - ['add', 'update'] - comp_arr_parent = get_execution_parent(compare_array, evaluate.Execution) - possible_stmts = [] - res = [] - for n in search_names: - try: - possible_stmts += module.used_names[n] - except KeyError: - continue - for stmt in possible_stmts: - # Check if the original scope is an execution. If it is, one - # can search for the same statement, that is in the module - # dict. Executions are somewhat special in jedi, since they - # literally copy the contents of a function. - if isinstance(comp_arr_parent, evaluate.Execution): - stmt = comp_arr_parent. \ - get_statement_for_position(stmt.start_pos) - if stmt is None: - continue - # InstanceElements are special, because they don't get copied, - # but have this wrapper around them. - if isinstance(comp_arr_parent, evaluate.InstanceElement): - stmt = evaluate.InstanceElement(comp_arr_parent.instance, stmt) - - if evaluate.follow_statement.push_stmt(stmt): - # check recursion - continue - res += check_calls(_scan_array(stmt.get_assignment_calls(), n), n) - evaluate.follow_statement.pop_stmt() - # reset settings - settings.dynamic_params_for_other_modules = temp_param_add - return res - - -def check_array_instances(instance): - """ Used for set() and list() instances. """ - if not settings.dynamic_arrays_instances: - return instance.var_args - ai = ArrayInstance(instance) - return helpers.generate_param_array([ai], instance.var_args.parent_stmt) - - -class ArrayInstance(parsing.Base): - """ - Used for the usage of set() and list(). - This is definitely a hack, but a good one :-) - It makes it possible to use set/list conversions. - """ - def __init__(self, instance): - self.instance = instance - self.var_args = instance.var_args - - def iter_content(self): - """ - The index is here just ignored, because of all the appends, etc. - lists/sets are too complicated too handle that. - """ - items = [] - for array in evaluate.follow_call_list(self.var_args): - if isinstance(array, evaluate.Instance) and len(array.var_args): - temp = array.var_args[0][0] - if isinstance(temp, ArrayInstance): - # prevent recursions - # TODO compare Modules - if self.var_args.start_pos != temp.var_args.start_pos: - items += temp.iter_content() - else: - debug.warning('ArrayInstance recursion', self.var_args) - continue - items += evaluate.get_iterator_types([array]) - - if self.var_args.parent_stmt is None: - return [] # generated var_args should not be checked for arrays - - module = self.var_args.parent_stmt.get_parent_until() - is_list = str(self.instance.name) == 'list' - items += _check_array_additions(self.instance, module, is_list) - return items - - -def related_names(definitions, search_name, mods): - def check_call(call): - result = [] - follow = [] # There might be multiple search_name's in one call_path - call_path = list(call.generate_call_path()) - for i, name in enumerate(call_path): - # name is `parsing.NamePart`. - if name == search_name: - follow.append(call_path[:i + 1]) - - for f in follow: - follow_res, search = evaluate.goto(call.parent_stmt, f) - follow_res = related_name_add_import_modules(follow_res, search) - - #print follow_res, [d.parent for d in follow_res] - # compare to see if they match - if any(r in definitions for r in follow_res): - scope = call.parent_stmt - result.append(api_classes.RelatedName(search, scope)) - - return result - - if not definitions: - return set() - - def is_definition(arr): - try: - for a in arr: - assert len(a) == 1 - a = a[0] - if a.isinstance(parsing.Array): - assert is_definition(a) - elif a.isinstance(parsing.Call): - assert a.execution is None - return True - except AssertionError: - return False - - mods |= set([d.get_parent_until() for d in definitions]) - names = [] - for m in get_directory_modules_for_name(mods, search_name): - try: - stmts = m.used_names[search_name] - except KeyError: - continue - for stmt in stmts: - if isinstance(stmt, parsing.Import): - count = 0 - imps = [] - for i in stmt.get_all_import_names(): - for name_part in i.names: - count += 1 - if name_part == search_name: - imps.append((count, name_part)) - - for used_count, name_part in imps: - i = imports.ImportPath(stmt, kill_count=count - used_count, - direct_resolve=True) - f = i.follow(is_goto=True) - if set(f) & set(definitions): - names.append(api_classes.RelatedName(name_part, stmt)) - else: - calls = _scan_array(stmt.get_assignment_calls(), search_name) - for d in stmt.assignment_details: - if not is_definition(d[1]): - calls += _scan_array(d[1], search_name) - for call in calls: - names += check_call(call) - return names - - -def related_name_add_import_modules(definitions, search_name): - """ Adds the modules of the imports """ - new = set() - for d in definitions: - if isinstance(d.parent, parsing.Import): - s = imports.ImportPath(d.parent, direct_resolve=True) - try: - new.add(s.follow(is_goto=True)[0]) - except IndexError: - pass - return set(definitions) | new - - -def check_flow_information(flow, search_name, pos): - """ Try to find out the type of a variable just with the information that - is given by the flows: e.g. It is also responsible for assert checks. - >>> if isinstance(k, str): - >>> k. # <- completion here - - ensures that `k` is a string. - """ - result = [] - if isinstance(flow, parsing.Scope) and not result: - for ass in reversed(flow.asserts): - if pos is None or ass.start_pos > pos: - continue - result = check_statement_information(ass, search_name) - if result: - break - - if isinstance(flow, parsing.Flow) and not result: - if flow.command in ['if', 'while'] and len(flow.inits) == 1: - result = check_statement_information(flow.inits[0], search_name) - return result - - -def check_statement_information(stmt, search_name): - try: - ass = stmt.get_assignment_calls() - try: - call = ass.get_only_subelement() - except AttributeError: - assert False - assert type(call) == parsing.Call and str(call.name) == 'isinstance' - assert bool(call.execution) - - # isinstance check - isinst = call.execution.values - assert len(isinst) == 2 # has two params - assert len(isinst[0]) == 1 - assert len(isinst[1]) == 1 - assert isinstance(isinst[0][0], parsing.Call) - # names fit? - assert str(isinst[0][0].name) == search_name - classes_call = isinst[1][0] # class_or_type_or_tuple - assert isinstance(classes_call, parsing.Call) - result = [] - for c in evaluate.follow_call(classes_call): - if isinstance(c, evaluate.Array): - result += c.get_index_types() - else: - result.append(c) - for i, c in enumerate(result): - result[i] = evaluate.Instance(c) - return result - except AssertionError: - return [] diff --git a/common/.local/lib/python2.7/site-packages/jedi/evaluate.py b/common/.local/lib/python2.7/site-packages/jedi/evaluate.py deleted file mode 100644 index 5b3d212..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/evaluate.py +++ /dev/null @@ -1,1614 +0,0 @@ -""" -This is the core part of jedi. Most of the logic, how to evaluate certain -objects (imports, etc.) is here. - -The functions should be described in their docstrings. However, there are some -classes, which are used to store the values. After those classes, there are the -search functions `get_names_for_scope` and `get_scopes_for_name`. At the end -there are the `follow_` functions, which evaluate a statement, or parts of a -statement. - -TODO super() -TODO nonlocal statement, needed or can be ignored? (py3k) -""" -from _compatibility import next, property, hasattr, is_py3k, use_metaclass, \ - unicode - -import sys -import itertools -import copy - -import parsing -import debug -import builtin -import imports -import helpers -import dynamic -import docstrings - -memoize_caches = [] -faked_scopes = [] - - -class DecoratorNotFound(LookupError): - """ - Decorators are sometimes not found, if that happens, that error is raised. - """ - pass - - -class MultiLevelStopIteration(Exception): - """ - StopIteration's get catched pretty easy by for loops, let errors propagate. - """ - pass - - -class MultiLevelAttributeError(Exception): - """ - Important, because `__getattr__` and `hasattr` catch AttributeErrors - implicitly. This is really evil (mainly because of `__getattr__`). - `hasattr` in Python 2 is even more evil, because it catches ALL exceptions. - Therefore this class has to be a `BaseException` and not an `Exception`. - But because I rewrote hasattr, we can now switch back to `Exception`. - - :param base: return values of sys.exc_info(). - """ - def __init__(self, base): - self.base = base - - def __str__(self): - import traceback - tb = traceback.format_exception(*self.base) - return 'Original:\n\n' + ''.join(tb) - - -def clear_caches(): - """ - Clears all caches of this and related modules. Jedi caches many things, - that should be completed after each completion finishes. The only things - that stays is the module cache (which is not deleted here). - """ - global memoize_caches, faked_scopes - - for m in memoize_caches: - m.clear() - - dynamic.search_param_cache.clear() - helpers.ExecutionRecursionDecorator.reset() - - # memorize_caches must never be deleted, because the dicts will get lost in - # the wrappers. - faked_scopes = [] - - follow_statement.reset() - - imports.imports_processed = 0 - - -def memoize_default(default=None): - """ - This is a typical memoization decorator, BUT there is one difference: - To prevent recursion it sets defaults. - - Preventing recursion is in this case the much bigger use than speed. I - don't think, that there is a big speed difference, but there are many cases - where recursion could happen (think about a = b; b = a). - """ - def func(function): - memo = {} - memoize_caches.append(memo) - - def wrapper(*args, **kwargs): - key = (args, frozenset(kwargs.items())) - if key in memo: - return memo[key] - else: - memo[key] = default - rv = function(*args, **kwargs) - memo[key] = rv - return rv - return wrapper - return func - - -class CachedMetaClass(type): - """ - This is basically almost the same than the decorator above, it just caches - class initializations. I haven't found any other way, so I do it with meta - classes. - """ - @memoize_default() - def __call__(self, *args, **kwargs): - return super(CachedMetaClass, self).__call__(*args, **kwargs) - - -class Executable(parsing.Base): - """ An instance is also an executable - because __init__ is called """ - def __init__(self, base, var_args=None): - self.base = base - # The param input array. - if var_args is None: - var_args = parsing.Array(None, None) - self.var_args = var_args - - def get_parent_until(self, *args, **kwargs): - return self.base.get_parent_until(*args, **kwargs) - - @property - def parent(self): - return self.base.parent - - -class Instance(use_metaclass(CachedMetaClass, Executable)): - """ This class is used to evaluate instances. """ - def __init__(self, base, var_args=None): - super(Instance, self).__init__(base, var_args) - if str(base.name) in ['list', 'set'] \ - and builtin.Builtin.scope == base.get_parent_until(): - # compare the module path with the builtin name. - self.var_args = dynamic.check_array_instances(self) - else: - # need to execute the __init__ function, because the dynamic param - # searching needs it. - try: - self.execute_subscope_by_name('__init__', self.var_args) - except KeyError: - pass - # Generated instances are classes that are just generated by self - # (No var_args) used. - self.is_generated = False - - @memoize_default() - def get_init_execution(self, func): - func = InstanceElement(self, func, True) - return Execution(func, self.var_args) - - def get_func_self_name(self, func): - """ - Returns the name of the first param in a class method (which is - normally self - """ - try: - return func.params[0].used_vars[0].names[0] - except IndexError: - return None - - def get_self_properties(self): - def add_self_dot_name(name): - n = copy.copy(name) - n.names = n.names[1:] - names.append(InstanceElement(self, n)) - - names = [] - # This loop adds the names of the self object, copies them and removes - # the self. - for sub in self.base.subscopes: - if isinstance(sub, parsing.Class): - continue - # Get the self name, if there's one. - self_name = self.get_func_self_name(sub) - if self_name: - # Check the __init__ function. - if sub.name.get_code() == '__init__': - sub = self.get_init_execution(sub) - for n in sub.get_set_vars(): - # Only names with the selfname are being added. - # It is also important, that they have a len() of 2, - # because otherwise, they are just something else - if n.names[0] == self_name and len(n.names) == 2: - add_self_dot_name(n) - - for s in self.base.get_super_classes(): - if s == self.base: - # I don't know how this could happen... But saw it once. - continue - names += Instance(s).get_self_properties() - - return names - - def get_subscope_by_name(self, name): - sub = self.base.get_subscope_by_name(name) - return InstanceElement(self, sub, True) - - def execute_subscope_by_name(self, name, args=None): - if args is None: - args = helpers.generate_param_array([]) - method = self.get_subscope_by_name(name) - if args.parent_stmt is None: - args.parent_stmt = method - return Execution(method, args).get_return_types() - - def get_descriptor_return(self, obj): - """ Throws a KeyError if there's no method. """ - # Arguments in __get__ descriptors are obj, class. - # `method` is the new parent of the array, don't know if that's good. - v = [obj, obj.base] if isinstance(obj, Instance) else [None, obj] - args = helpers.generate_param_array(v) - return self.execute_subscope_by_name('__get__', args) - - @memoize_default([]) - def get_defined_names(self): - """ - Get the instance vars of a class. This includes the vars of all - classes - """ - names = self.get_self_properties() - - class_names = self.base.get_defined_names() - for var in class_names: - names.append(InstanceElement(self, var, True)) - return names - - def scope_generator(self): - """ - An Instance has two scopes: The scope with self names and the class - scope. Instance variables have priority over the class scope. - """ - yield self, self.get_self_properties() - - names = [] - class_names = self.base.get_defined_names() - for var in class_names: - names.append(InstanceElement(self, var, True)) - yield self, names - - def get_index_types(self, index=None): - args = helpers.generate_param_array([] if index is None else [index]) - try: - return self.execute_subscope_by_name('__getitem__', args) - except KeyError: - debug.warning('No __getitem__, cannot access the array.') - return [] - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'name', 'get_imports', - 'docstr', 'asserts']: - raise AttributeError("Instance %s: Don't touch this (%s)!" - % (self, name)) - return getattr(self.base, name) - - def __repr__(self): - return "" % \ - (type(self).__name__, self.base, len(self.var_args or [])) - - -class InstanceElement(use_metaclass(CachedMetaClass)): - """ - InstanceElement is a wrapper for any object, that is used as an instance - variable (e.g. self.variable or class methods). - """ - def __init__(self, instance, var, is_class_var=False): - if isinstance(var, parsing.Function): - var = Function(var) - elif isinstance(var, parsing.Class): - var = Class(var) - self.instance = instance - self.var = var - self.is_class_var = is_class_var - - @property - @memoize_default() - def parent(self): - par = self.var.parent - if isinstance(par, Class) and par == self.instance.base \ - or isinstance(par, parsing.Class) \ - and par == self.instance.base.base: - par = self.instance - elif not isinstance(par, parsing.Module): - par = InstanceElement(self.instance, par, self.is_class_var) - return par - - def get_parent_until(self, *args, **kwargs): - return parsing.Simple.get_parent_until(self, *args, **kwargs) - - def get_decorated_func(self): - """ Needed because the InstanceElement should not be stripped """ - func = self.var.get_decorated_func() - if func == self.var: - return self - return func - - def get_assignment_calls(self): - # Copy and modify the array. - origin = self.var.get_assignment_calls() - # Delete parent, because it isn't used anymore. - new = helpers.fast_parent_copy(origin) - par = InstanceElement(self.instance, origin.parent_stmt, - self.is_class_var) - new.parent_stmt = par - faked_scopes.append(par) - faked_scopes.append(new) - return new - - def __getattr__(self, name): - return getattr(self.var, name) - - def isinstance(self, *cls): - return isinstance(self.var, cls) - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.var) - - -class Class(use_metaclass(CachedMetaClass, parsing.Base)): - """ - This class is not only important to extend `parsing.Class`, it is also a - important for descriptors (if the descriptor methods are evaluated or not). - """ - def __init__(self, base): - self.base = base - - @memoize_default(default=[]) - def get_super_classes(self): - supers = [] - # TODO care for mro stuff (multiple super classes). - for s in self.base.supers: - # Super classes are statements. - for cls in follow_statement(s): - if not isinstance(cls, Class): - debug.warning('Received non class, as a super class') - continue # Just ignore other stuff (user input error). - supers.append(cls) - if not supers and self.base.parent != builtin.Builtin.scope: - # add `object` to classes - supers += get_scopes_for_name(builtin.Builtin.scope, 'object') - return supers - - @memoize_default(default=[]) - def get_defined_names(self): - def in_iterable(name, iterable): - """ checks if the name is in the variable 'iterable'. """ - for i in iterable: - # Only the last name is important, because these names have a - # maximal length of 2, with the first one being `self`. - if i.names[-1] == name.names[-1]: - return True - return False - - result = self.base.get_defined_names() - super_result = [] - # TODO mro! - for cls in self.get_super_classes(): - # Get the inherited names. - for i in cls.get_defined_names(): - if not in_iterable(i, result): - super_result.append(i) - result += super_result - return result - - def get_subscope_by_name(self, name): - for sub in reversed(self.subscopes): - if sub.name.get_code() == name: - return sub - raise KeyError("Couldn't find subscope.") - - @property - def name(self): - return self.base.name - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'parent', 'subscopes', - 'get_imports', 'get_parent_until', 'docstr', 'asserts']: - raise AttributeError("Don't touch this (%s)!" % name) - return getattr(self.base, name) - - def __repr__(self): - return "" % (type(self).__name__, self.base) - - -class Function(use_metaclass(CachedMetaClass, parsing.Base)): - """ - Needed because of decorators. Decorators are evaluated here. - """ - - def __init__(self, func, is_decorated=False): - """ This should not be called directly """ - self.base_func = func - self.is_decorated = is_decorated - - @property - @memoize_default() - def _decorated_func(self): - """ - Returns the function, that is to be executed in the end. - This is also the places where the decorators are processed. - """ - f = self.base_func - - # Only enter it, if has not already been processed. - if not self.is_decorated: - for dec in reversed(self.base_func.decorators): - debug.dbg('decorator:', dec, f) - dec_results = follow_statement(dec) - if not len(dec_results): - debug.warning('decorator func not found: %s in stmt %s' % - (self.base_func, dec)) - return None - if len(dec_results) > 1: - debug.warning('multiple decorators found', self.base_func, - dec_results) - decorator = dec_results.pop() - # Create param array. - old_func = Function(f, is_decorated=True) - params = helpers.generate_param_array([old_func], old_func) - faked_scopes.append(old_func) - - wrappers = Execution(decorator, params).get_return_types() - if not len(wrappers): - debug.warning('no wrappers found', self.base_func) - return None - if len(wrappers) > 1: - debug.warning('multiple wrappers found', self.base_func, - wrappers) - # This is here, that the wrapper gets executed. - f = wrappers[0] - - debug.dbg('decorator end', f) - if f != self.base_func and isinstance(f, parsing.Function): - f = Function(f) - return f - - def get_decorated_func(self): - if self._decorated_func is None: - raise DecoratorNotFound() - if self._decorated_func == self.base_func: - return self - return self._decorated_func - - def get_magic_method_names(self): - return builtin.Builtin.magic_function_scope.get_defined_names() - - def get_magic_method_scope(self): - return builtin.Builtin.magic_function_scope - - def __getattr__(self, name): - return getattr(self.base_func, name) - - def __repr__(self): - dec = '' - if self._decorated_func != self.base_func: - dec = " is " + repr(self._decorated_func) - return "" % (type(self).__name__, self.base_func, dec) - - -class Execution(Executable): - """ - This class is used to evaluate functions and their returns. - - This is the most complicated class, because it contains the logic to - transfer parameters. It is even more complicated, because there may be - multiple calls to functions and recursion has to be avoided. But this is - responsibility of the decorators. - """ - @memoize_default(default=[]) - @helpers.ExecutionRecursionDecorator - def get_return_types(self, evaluate_generator=False): - """ Get the return types of a function. """ - stmts = [] - if self.base.parent == builtin.Builtin.scope \ - and not isinstance(self.base, (Generator, Array)): - func_name = str(self.base.name) - - # some implementations of builtins: - if func_name == 'getattr': - # follow the first param - try: - objects = follow_call_list([self.var_args[0]]) - names = follow_call_list([self.var_args[1]]) - except IndexError: - debug.warning('getattr() called with to few args.') - return [] - - for obj in objects: - if not isinstance(obj, (Instance, Class)): - debug.warning('getattr called without instance') - continue - - for name in names: - key = name.var_args.get_only_subelement() - stmts += follow_path(iter([key]), obj, self.base) - return stmts - elif func_name == 'type': - # otherwise it would be a metaclass - if len(self.var_args) == 1: - objects = follow_call_list([self.var_args[0]]) - return [o.base for o in objects if isinstance(o, Instance)] - - if self.base.isinstance(Class): - # There maybe executions of executions. - stmts = [Instance(self.base, self.var_args)] - elif isinstance(self.base, Generator): - return self.base.iter_content() - else: - # Don't do this with exceptions, as usual, because some deeper - # exceptions could be catched - and I wouldn't know what happened. - try: - self.base.returns - except (AttributeError, DecoratorNotFound): - if hasattr(self.base, 'execute_subscope_by_name'): - try: - stmts = self.base.execute_subscope_by_name('__call__', - self.var_args) - except KeyError: - debug.warning("no __call__ func available", self.base) - else: - debug.warning("no execution possible", self.base) - else: - stmts = self._get_function_returns(evaluate_generator) - - debug.dbg('exec result: %s in %s' % (stmts, self)) - - return imports.strip_imports(stmts) - - def _get_function_returns(self, evaluate_generator): - """ A normal Function execution """ - # Feed the listeners, with the params. - for listener in self.base.listeners: - listener.execute(self.get_params()) - func = self.base.get_decorated_func() - if func.is_generator and not evaluate_generator: - return [Generator(func, self.var_args)] - else: - stmts = [] - for r in self.returns: - stmts += follow_statement(r) - return stmts - - @memoize_default(default=[]) - def get_params(self): - """ - This returns the params for an Execution/Instance and is injected as a - 'hack' into the parsing.Function class. - This needs to be here, because Instance can have __init__ functions, - which act the same way as normal functions. - """ - def gen_param_name_copy(param, keys=[], values=[], array_type=None): - """ - Create a param with the original scope (of varargs) as parent. - """ - parent_stmt = self.var_args.parent_stmt - pos = parent_stmt.start_pos if parent_stmt else None - calls = parsing.Array(pos, parsing.Array.NOARRAY, parent_stmt) - calls.values = values - calls.keys = keys - calls.type = array_type - new_param = copy.copy(param) - if parent_stmt is not None: - new_param.parent = parent_stmt - new_param._assignment_calls_calculated = True - new_param._assignment_calls = calls - new_param.is_generated = True - name = copy.copy(param.get_name()) - name.parent = new_param - faked_scopes.append(new_param) - return name - - result = [] - start_offset = 0 - if isinstance(self.base, InstanceElement): - # Care for self -> just exclude it and add the instance - start_offset = 1 - self_name = copy.copy(self.base.params[0].get_name()) - self_name.parent = self.base.instance - result.append(self_name) - - param_dict = {} - for param in self.base.params: - param_dict[str(param.get_name())] = param - # There may be calls, which don't fit all the params, this just ignores - # it. - var_arg_iterator = self.get_var_args_iterator() - - non_matching_keys = [] - keys_used = set() - keys_only = False - for param in self.base.params[start_offset:]: - # The value and key can both be null. There, the defaults apply. - # args / kwargs will just be empty arrays / dicts, respectively. - # Wrong value count is just ignored. If you try to test cases that - # are not allowed in Python, Jedi will maybe not show any - # completions. - key, value = next(var_arg_iterator, (None, None)) - while key: - keys_only = True - try: - key_param = param_dict[str(key)] - except KeyError: - non_matching_keys.append((key, value)) - else: - keys_used.add(str(key)) - result.append(gen_param_name_copy(key_param, - values=[value])) - key, value = next(var_arg_iterator, (None, None)) - - assignments = param.get_assignment_calls().values - assignment = assignments[0] - keys = [] - values = [] - array_type = None - if assignment[0] == '*': - # *args param - array_type = parsing.Array.TUPLE - if value: - values.append(value) - for key, value in var_arg_iterator: - # Iterate until a key argument is found. - if key: - var_arg_iterator.push_back((key, value)) - break - values.append(value) - elif assignment[0] == '**': - # **kwargs param - array_type = parsing.Array.DICT - if non_matching_keys: - keys, values = zip(*non_matching_keys) - else: - # normal param - if value: - values = [value] - else: - if param.assignment_details: - # No value: return the default values. - values = assignments - else: - # If there is no assignment detail, that means there is - # no assignment, just the result. Therefore nothing has - # to be returned. - values = [] - - # Just ignore all the params that are without a key, after one - # keyword argument was set. - if not keys_only or assignment[0] == '**': - keys_used.add(str(key)) - result.append(gen_param_name_copy(param, keys=keys, - values=values, array_type=array_type)) - - if keys_only: - # sometimes param arguments are not completely written (which would - # create an Exception, but we have to handle that). - for k in set(param_dict) - keys_used: - result.append(gen_param_name_copy(param_dict[k])) - return result - - def get_var_args_iterator(self): - """ - Yields a key/value pair, the key is None, if its not a named arg. - """ - def iterate(): - # `var_args` is typically an Array, and not a list. - for var_arg in self.var_args: - # empty var_arg - if len(var_arg) == 0: - yield None, None - # *args - elif var_arg[0] == '*': - arrays = follow_call_list([var_arg[1:]]) - for array in arrays: - if hasattr(array, 'get_contents'): - for field in array.get_contents(): - yield None, field - # **kwargs - elif var_arg[0] == '**': - arrays = follow_call_list([var_arg[1:]]) - for array in arrays: - if hasattr(array, 'get_contents'): - for key, field in array.get_contents(): - # Take the first index. - if isinstance(key, parsing.Name): - name = key - else: - # `parsing`.[Call|Function|Class] lookup. - name = key[0].name - yield name, field - # Normal arguments (including key arguments). - else: - if len(var_arg) > 1 and var_arg[1] == '=': - # This is a named parameter (var_arg[0] is a Call). - yield var_arg[0].name, var_arg[2:] - else: - yield None, var_arg - - return iter(parsing.PushBackIterator(iterate())) - - def get_set_vars(self): - return self.get_defined_names() - - def get_defined_names(self): - """ - Call the default method with the own instance (self implements all - the necessary functions). Add also the params. - """ - return self.get_params() + parsing.Scope.get_set_vars(self) - - def copy_properties(self, prop): - """ - Literally copies a property of a Function. Copying is very expensive, - because it is something like `copy.deepcopy`. However, these copied - objects can be used for the executions, as if they were in the - execution. - """ - # Copy all these lists into this local function. - attr = getattr(self.base, prop) - objects = [] - for element in attr: - copied = helpers.fast_parent_copy(element) - copied.parent = self._scope_copy(copied.parent) - if isinstance(copied, parsing.Function): - copied = Function(copied) - objects.append(copied) - faked_scopes.append(copied) - return objects - - def __getattr__(self, name): - if name not in ['start_pos', 'end_pos', 'imports']: - raise AttributeError('Tried to access %s: %s. Why?' % (name, self)) - return getattr(self.base, name) - - @memoize_default() - def _scope_copy(self, scope): - try: - """ Copies a scope (e.g. if) in an execution """ - # TODO method uses different scopes than the subscopes property. - - # just check the start_pos, sometimes it's difficult with closures - # to compare the scopes directly. - if scope.start_pos == self.start_pos: - return self - else: - copied = helpers.fast_parent_copy(scope) - copied.parent = self._scope_copy(copied.parent) - faked_scopes.append(copied) - return copied - except AttributeError: - raise MultiLevelAttributeError(sys.exc_info()) - - @property - @memoize_default() - def returns(self): - return self.copy_properties('returns') - - @property - @memoize_default() - def asserts(self): - return self.copy_properties('asserts') - - @property - @memoize_default() - def statements(self): - return self.copy_properties('statements') - - @property - @memoize_default() - def subscopes(self): - return self.copy_properties('subscopes') - - def get_statement_for_position(self, pos): - return parsing.Scope.get_statement_for_position(self, pos) - - def __repr__(self): - return "<%s of %s>" % \ - (type(self).__name__, self.base) - - -class Generator(use_metaclass(CachedMetaClass, parsing.Base)): - """ Cares for `yield` statements. """ - def __init__(self, func, var_args): - super(Generator, self).__init__() - self.func = func - self.var_args = var_args - - def get_defined_names(self): - """ - Returns a list of names that define a generator, which can return the - content of a generator. - """ - names = [] - none_pos = (0, 0) - executes_generator = ('__next__', 'send') - for n in ('close', 'throw') + executes_generator: - name = parsing.Name([(n, none_pos)], none_pos, none_pos) - if n in executes_generator: - name.parent = self - names.append(name) - debug.dbg('generator names', names) - return names - - def iter_content(self): - """ returns the content of __iter__ """ - return Execution(self.func, self.var_args).get_return_types(True) - - def get_index_types(self, index=None): - debug.warning('Tried to get array access on a generator', self) - return [] - - @property - def parent(self): - return self.func.parent - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.func) - - -class Array(use_metaclass(CachedMetaClass, parsing.Base)): - """ - Used as a mirror to parsing.Array, if needed. It defines some getter - methods which are important in this module. - """ - def __init__(self, array): - self._array = array - - def get_index_types(self, index_call_list=None): - """ Get the types of a specific index or all, if not given """ - # array slicing - if index_call_list is not None: - if index_call_list and [x for x in index_call_list if ':' in x]: - return [self] - - index_possibilities = list(follow_call_list(index_call_list)) - if len(index_possibilities) == 1: - # This is indexing only one element, with a fixed index number, - # otherwise it just ignores the index (e.g. [1+1]). - try: - # Multiple elements in the array are not wanted. var_args - # and get_only_subelement can raise AttributeErrors. - i = index_possibilities[0].var_args.get_only_subelement() - except AttributeError: - pass - else: - try: - return self.get_exact_index_types(i) - except (IndexError, KeyError): - pass - - result = list(self.follow_values(self._array.values)) - result += dynamic.check_array_additions(self) - return set(result) - - def get_exact_index_types(self, index): - """ Here the index is an int. Raises IndexError/KeyError """ - if self._array.type == parsing.Array.DICT: - old_index = index - index = None - for i, key_elements in enumerate(self._array.keys): - # Because we only want the key to be a string. - if len(key_elements) == 1: - try: - str_key = key_elements.get_code() - except AttributeError: - try: - str_key = key_elements[0].name - except AttributeError: - str_key = None - if old_index == str_key: - index = i - break - if index is None: - raise KeyError('No key found in dictionary') - values = [self._array[index]] - return self.follow_values(values) - - def follow_values(self, values): - """ helper function for the index getters """ - return follow_call_list(values) - - def get_defined_names(self): - """ - This method generates all ArrayElements for one parsing.Array. - It returns e.g. for a list: append, pop, ... - """ - # `array.type` is a string with the type, e.g. 'list'. - scope = get_scopes_for_name(builtin.Builtin.scope, self._array.type)[0] - scope = Instance(scope) - names = scope.get_defined_names() - return [ArrayElement(n) for n in names] - - def get_contents(self): - return self._array - - @property - def parent(self): - """ - Return the builtin scope as parent, because the arrays are builtins - """ - return builtin.Builtin.scope - - def get_parent_until(self, *args, **kwargs): - return builtin.Builtin.scope - - def __getattr__(self, name): - if name not in ['type', 'start_pos']: - raise AttributeError('Strange access: %s.' % name) - return getattr(self._array, name) - - def __repr__(self): - return "" % (type(self).__name__, self._array) - - -class ArrayElement(object): - """ - A name, e.g. `list.append`, it is used to access to original array methods. - """ - def __init__(self, name): - super(ArrayElement, self).__init__() - self.name = name - - def __getattr__(self, name): - # Set access privileges: - if name not in ['parent', 'names', 'start_pos', 'end_pos', 'get_code']: - raise AttributeError('Strange access: %s.' % name) - return getattr(self.name, name) - - def get_parent_until(self): - return builtin.Builtin.scope - - def __repr__(self): - return "<%s of %s>" % (type(self).__name__, self.name) - - -def get_defined_names_for_position(scope, position=None, start_scope=None): - """ - Deletes all names that are ahead of the position, except for some special - objects like instances, where the position doesn't matter. - - :param position: the position as a line/column tuple, default is infinity. - """ - names = scope.get_defined_names() - # Instances have special rules, always return all the possible completions, - # because class variables are always valid and the `self.` variables, too. - if (not position or isinstance(scope, (Array, Instance)) - or start_scope != scope - and isinstance(start_scope, (parsing.Function, Execution))): - return names - names_new = [] - for n in names: - if n.start_pos[0] is not None and n.start_pos < position: - names_new.append(n) - return names_new - - -def get_names_for_scope(scope, position=None, star_search=True, - include_builtin=True): - """ - Get all completions possible for the current scope. - The star search option is only here to provide an optimization. Otherwise - the whole thing would probably start a little recursive madness. - """ - in_func_scope = scope - non_flow = scope.get_parent_until(parsing.Flow, reverse=True, - include_current=True) - while scope: - # `parsing.Class` is used, because the parent is never `Class`. - # Ignore the Flows, because the classes and functions care for that. - # InstanceElement of Class is ignored, if it is not the start scope. - if not (scope != non_flow and scope.isinstance(parsing.Class) - or scope.isinstance(parsing.Flow) - or scope.isinstance(Instance) - and non_flow.isinstance(Function) - ): - try: - if isinstance(scope, Instance): - for g in scope.scope_generator(): - yield g - else: - yield scope, get_defined_names_for_position(scope, - position, in_func_scope) - except StopIteration: - raise MultiLevelStopIteration('StopIteration raised somewhere') - if scope.isinstance(parsing.ForFlow) and scope.is_list_comp: - # is a list comprehension - yield scope, scope.get_set_vars(is_internal_call=True) - - scope = scope.parent - # This is used, because subscopes (Flow scopes) would distort the - # results. - if scope and scope.isinstance(Function, parsing.Function, Execution): - in_func_scope = scope - - # Add star imports. - if star_search: - for s in imports.remove_star_imports(non_flow.get_parent_until()): - for g in get_names_for_scope(s, star_search=False): - yield g - - # Add builtins to the global scope. - if include_builtin: - builtin_scope = builtin.Builtin.scope - yield builtin_scope, builtin_scope.get_defined_names() - - -def get_scopes_for_name(scope, name_str, position=None, search_global=False, - is_goto=False): - """ - This is the search function. The most important part to debug. - `remove_statements` and `filter_statements` really are the core part of - this completion. - - :param position: Position of the last statement -> tuple of line, column - :return: List of Names. Their parents are the scopes, they are defined in. - :rtype: list - """ - def remove_statements(result): - """ - This is the part where statements are being stripped. - - Due to lazy evaluation, statements like a = func; b = a; b() have to be - evaluated. - """ - res_new = [] - for r in result: - add = [] - if r.isinstance(parsing.Statement): - check_instance = None - if isinstance(r, InstanceElement) and r.is_class_var: - check_instance = r.instance - r = r.var - - # Global variables handling. - if r.is_global(): - for token_name in r.token_list[1:]: - if isinstance(token_name, parsing.Name): - add = get_scopes_for_name(r.parent, - str(token_name)) - else: - # generated objects are used within executions, but these - # objects are in functions, and we have to dynamically - # execute first. - if isinstance(r, parsing.Param): - func = r.parent - # Instances are typically faked, if the instance is not - # called from outside. Here we check it for __init__ - # functions and return. - if isinstance(func, InstanceElement) \ - and func.instance.is_generated \ - and hasattr(func, 'name') \ - and str(func.name) == '__init__' \ - and r.position_nr > 0: # 0 would be self - r = func.var.params[r.position_nr] - - # add docstring knowledge - doc_params = docstrings.follow_param(r) - if doc_params: - res_new += doc_params - continue - - if not r.is_generated: - res_new += dynamic.search_params(r) - if not r.assignment_details: - # this means that there are no default params, - # so just ignore it. - continue - - scopes = follow_statement(r, seek_name=name_str) - add += remove_statements(scopes) - - if check_instance is not None: - # class renames - add = [InstanceElement(check_instance, a, True) - if isinstance(a, (Function, parsing.Function)) - else a for a in add] - res_new += add - else: - if isinstance(r, parsing.Class): - r = Class(r) - elif isinstance(r, parsing.Function): - r = Function(r) - if r.isinstance(Function): - try: - r = r.get_decorated_func() - except DecoratorNotFound: - continue - res_new.append(r) - debug.dbg('sfn remove, new: %s, old: %s' % (res_new, result)) - return res_new - - def filter_name(scope_generator): - """ - Filters all variables of a scope (which are defined in the - `scope_generator`), until the name fits. - """ - def handle_for_loops(loop): - # Take the first statement (for has always only - # one, remember `in`). And follow it. - if not len(loop.inits): - return [] - result = get_iterator_types(follow_statement(loop.inits[0])) - if len(loop.set_vars) > 1: - var_arr = loop.set_stmt.get_assignment_calls() - result = assign_tuples(var_arr, result, name_str) - return result - - def process(name): - """ - Returns the parent of a name, which means the element which stands - behind a name. - """ - result = [] - no_break_scope = False - par = name.parent - - if par.isinstance(parsing.Flow): - if par.command == 'for': - result += handle_for_loops(par) - else: - debug.warning('Flow: Why are you here? %s' % par.command) - elif par.isinstance(parsing.Param) \ - and par.parent is not None \ - and par.parent.parent.isinstance(parsing.Class) \ - and par.position_nr == 0: - # This is where self gets added - this happens at another - # place, if the var_args are clear. But sometimes the class is - # not known. Therefore add a new instance for self. Otherwise - # take the existing. - if isinstance(scope, InstanceElement): - inst = scope.instance - else: - inst = Instance(Class(par.parent.parent)) - inst.is_generated = True - result.append(inst) - elif par.isinstance(parsing.Statement): - def is_execution(arr): - for a in arr: - a = a[0] # rest is always empty with assignees - if a.isinstance(parsing.Array): - if is_execution(a): - return True - elif a.isinstance(parsing.Call): - if a.name == name and a.execution: - return True - return False - - is_exe = False - for op, assignee in par.assignment_details: - is_exe |= is_execution(assignee) - if is_exe: - # filter array[3] = ... - # TODO check executions for dict contents - pass - else: - details = par.assignment_details - if details and details[0][0] != '=': - no_break_scope = True - - # TODO this makes self variables non-breakable. wanted? - if isinstance(name, InstanceElement) \ - and not name.is_class_var: - no_break_scope = True - - result.append(par) - else: - result.append(par) - return result, no_break_scope - - flow_scope = scope - result = [] - # compare func uses the tuple of line/indent = line/column - comparison_func = lambda name: (name.start_pos) - - for nscope, name_list in scope_generator: - break_scopes = [] - # here is the position stuff happening (sorting of variables) - for name in sorted(name_list, key=comparison_func, reverse=True): - p = name.parent.parent if name.parent else None - if isinstance(p, InstanceElement) \ - and isinstance(p.var, parsing.Class): - p = p.var - if name_str == name.get_code() and p not in break_scopes: - r, no_break_scope = process(name) - if is_goto: - if r: - # Directly assign the name, but there has to be a - # result. - result.append(name) - else: - result += r - # for comparison we need the raw class - s = nscope.base if isinstance(nscope, Class) else nscope - # this means that a definition was found and is not e.g. - # in if/else. - if result and not no_break_scope: - if not name.parent or p == s: - break - break_scopes.append(p) - - while flow_scope: - # TODO check if result is in scope -> no evaluation necessary - n = dynamic.check_flow_information(flow_scope, name_str, - position) - if n: - result = n - break - - if result: - break - if flow_scope == nscope: - break - flow_scope = flow_scope.parent - flow_scope = nscope - if result: - break - - if not result and isinstance(nscope, Instance): - # getattr() / __getattr__ / __getattribute__ - result += check_getattr(nscope, name_str) - debug.dbg('sfn filter "%s" in %s: %s' % (name_str, nscope, result)) - return result - - def descriptor_check(result): - """ Processes descriptors """ - res_new = [] - for r in result: - if isinstance(scope, (Instance, Class)) \ - and hasattr(r, 'get_descriptor_return'): - # handle descriptors - try: - res_new += r.get_descriptor_return(scope) - continue - except KeyError: - pass - res_new.append(r) - return res_new - - if search_global: - scope_generator = get_names_for_scope(scope, position=position) - else: - if isinstance(scope, Instance): - scope_generator = scope.scope_generator() - else: - if isinstance(scope, (Class, parsing.Module)): - # classes are only available directly via chaining? - # strange stuff... - names = scope.get_defined_names() - else: - names = get_defined_names_for_position(scope, position) - scope_generator = iter([(scope, names)]) - - if is_goto: - return filter_name(scope_generator) - return descriptor_check(remove_statements(filter_name(scope_generator))) - - -def check_getattr(inst, name_str): - result = [] - # str is important to lose the NamePart! - name = parsing.Call(str(name_str), parsing.Call.STRING, (0, 0), inst) - args = helpers.generate_param_array([name]) - try: - result = inst.execute_subscope_by_name('__getattr__', args) - except KeyError: - pass - if not result: - # this is a little bit special. `__getattribute__` is executed - # before anything else. But: I know no use case, where this - # could be practical and the jedi would return wrong types. If - # you ever have something, let me know! - try: - result = inst.execute_subscope_by_name('__getattribute__', args) - except KeyError: - pass - return result - - -def get_iterator_types(inputs): - """ Returns the types of any iterator (arrays, yields, __iter__, etc). """ - iterators = [] - # Take the first statement (for has always only - # one, remember `in`). And follow it. - for it in inputs: - if isinstance(it, (Generator, Array, dynamic.ArrayInstance)): - iterators.append(it) - else: - if not hasattr(it, 'execute_subscope_by_name'): - debug.warning('iterator/for loop input wrong', it) - continue - try: - iterators += it.execute_subscope_by_name('__iter__') - except KeyError: - debug.warning('iterators: No __iter__ method found.') - - result = [] - for gen in iterators: - if isinstance(gen, Array): - # Array is a little bit special, since this is an internal - # array, but there's also the list builtin, which is - # another thing. - result += gen.get_index_types() - elif isinstance(gen, Instance): - # __iter__ returned an instance. - name = '__next__' if is_py3k else 'next' - try: - result += gen.execute_subscope_by_name(name) - except KeyError: - debug.warning('Instance has no __next__ function', gen) - else: - # is a generator - result += gen.iter_content() - return result - - -def assign_tuples(tup, results, seek_name): - """ - This is a normal assignment checker. In python functions and other things - can return tuples: - >>> a, b = 1, "" - >>> a, (b, c) = 1, ("", 1.0) - - Here, if seek_name is "a", the number type will be returned. - The first part (before `=`) is the param tuples, the second one result. - - :type tup: parsing.Array - """ - def eval_results(index): - types = [] - for r in results: - if hasattr(r, "get_exact_index_types"): - try: - types += r.get_exact_index_types(index) - except IndexError: - pass - else: - debug.warning("invalid tuple lookup %s of result %s in %s" - % (tup, results, seek_name)) - - return types - - result = [] - if tup.type == parsing.Array.NOARRAY: - # Here we have unnessecary braces, which we just remove. - arr = tup.get_only_subelement() - if type(arr) == parsing.Call: - if arr.name.names[-1] == seek_name: - result = results - else: - result = assign_tuples(arr, results, seek_name) - else: - for i, t in enumerate(tup): - # Used in assignments. There is just one call and no other things, - # therefore we can just assume, that the first part is important. - if len(t) != 1: - raise AttributeError('Array length should be 1') - t = t[0] - - # Check the left part, if there are still tuples in it or a Call. - if isinstance(t, parsing.Array): - # These are "sub"-tuples. - result += assign_tuples(t, eval_results(i), seek_name) - else: - if t.name.names[-1] == seek_name: - result += eval_results(i) - return result - - -@helpers.RecursionDecorator -@memoize_default(default=[]) -def follow_statement(stmt, seek_name=None): - """ - The starting point of the completion. A statement always owns a call list, - which are the calls, that a statement does. - In case multiple names are defined in the statement, `seek_name` returns - the result for this name. - - :param stmt: A `parsing.Statement`. - :param seek_name: A string. - """ - debug.dbg('follow_stmt %s (%s)' % (stmt, seek_name)) - call_list = stmt.get_assignment_calls() - debug.dbg('calls: %s' % call_list) - - try: - result = follow_call_list(call_list) - except AttributeError: - # This is so evil! But necessary to propagate errors. The attribute - # errors here must not be catched, because they shouldn't exist. - raise MultiLevelAttributeError(sys.exc_info()) - - # Assignment checking is only important if the statement defines multiple - # variables. - if len(stmt.get_set_vars()) > 1 and seek_name and stmt.assignment_details: - new_result = [] - for op, set_vars in stmt.assignment_details: - new_result += assign_tuples(set_vars, result, seek_name) - result = new_result - return set(result) - - -def follow_call_list(call_list): - """ - The call_list has a special structure. - This can be either `parsing.Array` or `list of list`. - It is used to evaluate a two dimensional object, that has calls, arrays and - operators in it. - """ - def evaluate_list_comprehension(lc, parent=None): - input = lc.input - nested_lc = lc.input.token_list[0] - if isinstance(nested_lc, parsing.ListComprehension): - # is nested LC - input = nested_lc.stmt - loop = parsing.ForFlow([input], lc.stmt.start_pos, - lc.middle, True) - if parent is None: - loop.parent = lc.stmt.parent - else: - loop.parent = parent - - if isinstance(nested_lc, parsing.ListComprehension): - loop = evaluate_list_comprehension(nested_lc, loop) - return loop - - if parsing.Array.is_type(call_list, parsing.Array.TUPLE, - parsing.Array.DICT): - # Tuples can stand just alone without any braces. These would be - # recognized as separate calls, but actually are a tuple. - result = follow_call(call_list) - else: - result = [] - for calls in call_list: - calls_iterator = iter(calls) - for call in calls_iterator: - if parsing.Array.is_type(call, parsing.Array.NOARRAY): - result += follow_call_list(call) - elif isinstance(call, parsing.ListComprehension): - loop = evaluate_list_comprehension(call) - stmt = copy.copy(call.stmt) - stmt.parent = loop - # create a for loop which does the same as list - # comprehensions - result += follow_statement(stmt) - else: - # With things like params, these can also be functions... - if isinstance(call, (Function, Class, Instance, - dynamic.ArrayInstance)): - result.append(call) - # The string tokens are just operations (+, -, etc.) - elif not isinstance(call, (str, unicode)): - if str(call.name) == 'if': - # Ternary operators. - while True: - try: - call = next(calls_iterator) - except StopIteration: - break - try: - if str(call.name) == 'else': - break - except AttributeError: - pass - continue - result += follow_call(call) - elif call == '*': - if [r for r in result if isinstance(r, Array) - or isinstance(r, Instance) - and str(r.name) == 'str']: - # if it is an iterable, ignore * operations - next(calls_iterator) - return set(result) - - -def follow_call(call): - """ Follow a call is following a function, variable, string, etc. """ - scope = call.parent_stmt.parent - path = call.generate_call_path() - position = call.parent_stmt.start_pos - return follow_call_path(path, scope, position) - - -def follow_call_path(path, scope, position): - """ Follows a path generated by `parsing.Call.generate_call_path()` """ - current = next(path) - - if isinstance(current, parsing.Array): - result = [Array(current)] - else: - if not isinstance(current, parsing.NamePart): - if current.type in (parsing.Call.STRING, parsing.Call.NUMBER): - t = type(current.name).__name__ - scopes = get_scopes_for_name(builtin.Builtin.scope, t) - else: - debug.warning('unknown type:', current.type, current) - scopes = [] - # Make instances of those number/string objects. - arr = helpers.generate_param_array([current.name]) - scopes = [Instance(s, arr) for s in scopes] - else: - # This is the first global lookup. - scopes = get_scopes_for_name(scope, current, position=position, - search_global=True) - result = imports.strip_imports(scopes) - - return follow_paths(path, result, scope, position=position) - - -def follow_paths(path, results, call_scope, position=None): - """ - In each result, `path` must be followed. Copies the path iterator. - """ - results_new = [] - if results: - if len(results) > 1: - iter_paths = itertools.tee(path, len(results)) - else: - iter_paths = [path] - - for i, r in enumerate(results): - fp = follow_path(iter_paths[i], r, call_scope, position=position) - if fp is not None: - results_new += fp - else: - # This means stop iteration. - return results - return results_new - - -def follow_path(path, scope, call_scope, position=None): - """ - Uses a generator and tries to complete the path, e.g. - >>> foo.bar.baz - - `follow_path` is only responsible for completing `.bar.baz`, the rest is - done in the `follow_call` function. - """ - # Current is either an Array or a Scope. - try: - current = next(path) - except StopIteration: - return None - debug.dbg('follow %s in scope %s' % (current, scope)) - - result = [] - if isinstance(current, parsing.Array): - # This must be an execution, either () or []. - if current.type == parsing.Array.LIST: - if hasattr(scope, 'get_index_types'): - result = scope.get_index_types(current) - elif current.type not in [parsing.Array.DICT]: - # Scope must be a class or func - make an instance or execution. - debug.dbg('exe', scope) - result = Execution(scope, current).get_return_types() - else: - # Curly braces are not allowed, because they make no sense. - debug.warning('strange function call with {}', current, scope) - else: - # The function must not be decorated with something else. - if scope.isinstance(Function): - scope = scope.get_magic_method_scope() - else: - # This is the typical lookup while chaining things. - if filter_private_variable(scope, call_scope, current): - return [] - result = imports.strip_imports(get_scopes_for_name(scope, current, - position=position)) - return follow_paths(path, set(result), call_scope, position=position) - - -def filter_private_variable(scope, call_scope, var_name): - if isinstance(var_name, (str, unicode)) \ - and var_name.startswith('__') and isinstance(scope, Instance): - s = call_scope.get_parent_until((parsing.Class, Instance), - include_current=True) - if s != scope and s != scope.base.base: - return True - return False - - -def goto(stmt, call_path=None): - if call_path is None: - arr = stmt.get_assignment_calls() - call = arr.get_only_subelement() - call_path = list(call.generate_call_path()) - - scope = stmt.parent - pos = stmt.start_pos - call_path, search = call_path[:-1], call_path[-1] - if call_path: - scopes = follow_call_path(iter(call_path), scope, pos) - search_global = False - pos = None - else: - scopes = [scope] - search_global = True - follow_res = [] - for s in scopes: - follow_res += get_scopes_for_name(s, search, pos, - search_global=search_global, is_goto=True) - return follow_res, search diff --git a/common/.local/lib/python2.7/site-packages/jedi/helpers.py b/common/.local/lib/python2.7/site-packages/jedi/helpers.py deleted file mode 100644 index b1f6e4c..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/helpers.py +++ /dev/null @@ -1,266 +0,0 @@ -import copy -import contextlib - -import parsing -import evaluate -import debug -import builtin -import settings - - -class RecursionDecorator(object): - """ - A decorator to detect recursions in statements. In a recursion a statement - at the same place, in the same module may not be executed two times. - """ - def __init__(self, func): - self.func = func - self.reset() - - def __call__(self, stmt, *args, **kwargs): - #print stmt, len(self.node_statements()) - if self.push_stmt(stmt): - return [] - else: - result = self.func(stmt, *args, **kwargs) - self.pop_stmt() - return result - - def push_stmt(self, stmt): - self.current = RecursionNode(stmt, self.current) - if self._check_recursion(): - debug.warning('catched recursion', stmt) - self.pop_stmt() - return True - return False - - def pop_stmt(self): - if self.current is not None: - # I don't know how current can be None, but sometimes it happens - # with Python3. - self.current = self.current.parent - - def _check_recursion(self): - test = self.current - while True: - test = test.parent - if self.current == test: - return True - if not test: - return False - - def reset(self): - self.top = None - self.current = None - - def node_statements(self): - result = [] - n = self.current - while n: - result.insert(0, n.stmt) - n = n.parent - return result - - -class RecursionNode(object): - """ A node of the RecursionDecorator. """ - def __init__(self, stmt, parent): - self.script = stmt.get_parent_until() - self.position = stmt.start_pos - self.parent = parent - self.stmt = stmt - - # Don't check param instances, they are not causing recursions - # The same's true for the builtins, because the builtins are really - # simple. - self.is_ignored = isinstance(stmt, parsing.Param) \ - or (self.script == builtin.Builtin.scope) - - def __eq__(self, other): - if not other: - return None - return self.script == other.script \ - and self.position == other.position \ - and not self.is_ignored and not other.is_ignored - - -class ExecutionRecursionDecorator(object): - """ - Catches recursions of executions. - It is designed like a Singelton. Only one instance should exist. - """ - def __init__(self, func): - self.func = func - self.reset() - - def __call__(self, execution, evaluate_generator=False): - debug.dbg('Execution recursions: %s' % execution, self.recursion_level, - self.execution_count, len(self.execution_funcs)) - if self.check_recursion(execution, evaluate_generator): - result = [] - else: - result = self.func(execution, evaluate_generator) - self.cleanup() - return result - - @classmethod - def cleanup(cls): - cls.parent_execution_funcs.pop() - cls.recursion_level -= 1 - - @classmethod - def check_recursion(cls, execution, evaluate_generator): - in_par_execution_funcs = execution.base in cls.parent_execution_funcs - in_execution_funcs = execution.base in cls.execution_funcs - cls.recursion_level += 1 - cls.execution_count += 1 - cls.execution_funcs.add(execution.base) - cls.parent_execution_funcs.append(execution.base) - - if cls.execution_count > settings.max_executions: - return True - - if isinstance(execution.base, (evaluate.Generator, evaluate.Array)): - return False - module = execution.get_parent_until() - if evaluate_generator or module == builtin.Builtin.scope: - return False - - if in_par_execution_funcs: - if cls.recursion_level > settings.max_function_recursion_level: - return True - if in_execution_funcs and \ - len(cls.execution_funcs) > settings.max_until_execution_unique: - return True - if cls.execution_count > settings.max_executions_without_builtins: - return True - return False - - @classmethod - def reset(cls): - cls.recursion_level = 0 - cls.parent_execution_funcs = [] - cls.execution_funcs = set() - cls.execution_count = 0 - - -def fast_parent_copy(obj): - """ - Much, much faster than copy.deepcopy, but just for certain elements. - """ - new_elements = {} - - def recursion(obj): - new_obj = copy.copy(obj) - new_elements[obj] = new_obj - - items = new_obj.__dict__.items() - for key, value in items: - # replace parent (first try _parent and then parent) - if key in ['parent', '_parent', '_parent_stmt'] \ - and value is not None: - if key == 'parent' and '_parent' in items: - # parent can be a property - continue - try: - setattr(new_obj, key, new_elements[value]) - except KeyError: - pass - elif key in ['parent_stmt', 'parent_function']: - continue - elif isinstance(value, list): - setattr(new_obj, key, list_rec(value)) - elif isinstance(value, (parsing.Simple, parsing.Call)): - setattr(new_obj, key, recursion(value)) - return new_obj - - def list_rec(list_obj): - copied_list = list_obj[:] # lists, tuples, strings, unicode - for i, el in enumerate(copied_list): - if isinstance(el, (parsing.Simple, parsing.Call)): - copied_list[i] = recursion(el) - elif isinstance(el, list): - copied_list[i] = list_rec(el) - return copied_list - return recursion(obj) - - -def generate_param_array(args_tuple, parent_stmt=None): - """ This generates an array, that can be used as a param. """ - values = [] - for arg in args_tuple: - if arg is None: - values.append([]) - else: - values.append([arg]) - pos = None - arr = parsing.Array(pos, parsing.Array.TUPLE, parent_stmt, values=values) - evaluate.faked_scopes.append(arr) - return arr - - -def scan_array_for_pos(arr, pos): - """ - Returns the function Call that match search_name in an Array. - Makes changes to arr! - """ - def check_arr_index(): - positions = arr.arr_el_pos - for index, comma_pos in enumerate(positions): - if pos < comma_pos: - return index - return len(positions) - - call = None - stop = False - for sub in arr.values: - call = None - for s in sub: - if isinstance(s, parsing.Array): - new = scan_array_for_pos(s, pos) - if new[0] is not None: - call, index, stop = new - if stop: - return call, index, stop - elif isinstance(s, parsing.Call): - start_s = s - # check parts of calls - while s is not None: - if s.start_pos >= pos: - return call, check_arr_index(), stop - elif s.execution is not None: - end = s.execution.end_pos - if s.execution.start_pos < pos and \ - (end is None or pos < end): - c, index, stop = scan_array_for_pos( - s.execution, pos) - if stop: - return c, index, stop - - # call should return without execution and - # next - reset = c or s - if reset.execution.type not in \ - [parsing.Array.TUPLE, - parsing.Array.NOARRAY]: - return start_s, index, False - - reset.execution = None - reset.next = None - return c or start_s, index, True - s = s.next - - # The third return is just necessary for recursion inside, because - # it needs to know when to stop iterating. - return call, check_arr_index(), stop - - -@contextlib.contextmanager -def scale_speed_settings(factor): - a = settings.max_executions - b = settings.max_until_execution_unique - settings.max_executions *= factor - settings.max_until_execution_unique *= factor - yield - settings.max_executions = a - settings.max_until_execution_unique = b diff --git a/common/.local/lib/python2.7/site-packages/jedi/imports.py b/common/.local/lib/python2.7/site-packages/jedi/imports.py deleted file mode 100644 index 03a76ca..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/imports.py +++ /dev/null @@ -1,332 +0,0 @@ -from __future__ import with_statement - -import os -import pkgutil -import imp -import sys -import time - -import builtin -import modules -import debug -import parsing -import evaluate -import itertools -import settings - -# for debugging purposes only -imports_processed = 0 - -star_import_cache = {} - - -class ModuleNotFound(Exception): - pass - - -class ImportPath(parsing.Base): - """ - An ImportPath is the path of a `parsing.Import` object. - """ - class _GlobalNamespace(object): - def get_defined_names(self): - return [] - - def get_imports(self): - return [] - - @property - def start_pos(self): - return (0, 0) - - def get_parent_until(self): - return None - - GlobalNamespace = _GlobalNamespace() - - def __init__(self, import_stmt, is_like_search=False, kill_count=0, - direct_resolve=False): - self.import_stmt = import_stmt - self.is_like_search = is_like_search - self.direct_resolve = direct_resolve - self.is_partial_import = bool(kill_count) - path = import_stmt.get_parent_until().path - self.file_path = os.path.dirname(path) if path is not None else None - - # rest is import_path resolution - self.import_path = [] - if import_stmt.from_ns: - self.import_path += import_stmt.from_ns.names - if import_stmt.namespace: - if self.is_nested_import() and not direct_resolve: - self.import_path.append(import_stmt.namespace.names[0]) - else: - self.import_path += import_stmt.namespace.names - - for i in range(kill_count + int(is_like_search)): - self.import_path.pop() - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.import_stmt) - - def is_nested_import(self): - """ - This checks for the special case of nested imports, without aliases and - from statement: - >>> import foo.bar - """ - return not self.import_stmt.alias and not self.import_stmt.from_ns \ - and len(self.import_stmt.namespace.names) > 1 \ - and not self.direct_resolve - - def get_nested_import(self, parent): - """ - See documentation of `self.is_nested_import`. - Generates an Import statement, that can be used to fake nested imports. - """ - i = self.import_stmt - # This is not an existing Import statement. Therefore, set position to - # 0 (0 is not a valid line number). - zero = (0, 0) - n = parsing.Name(i.namespace.names[1:], zero, zero, self.import_stmt) - new = parsing.Import(zero, zero, n) - new.parent = parent - evaluate.faked_scopes.append(new) - debug.dbg('Generated a nested import: %s' % new) - return new - - def get_defined_names(self, on_import_stmt=False): - names = [] - for scope in self.follow(): - if scope is ImportPath.GlobalNamespace: - if self.import_stmt.relative_count == 0: - names += self.get_module_names() - - if self.file_path is not None: - path = os.path.abspath(self.file_path) - for i in range(self.import_stmt.relative_count - 1): - path = os.path.dirname(path) - names += self.get_module_names([path]) - else: - if on_import_stmt and isinstance(scope, parsing.Module) \ - and scope.path.endswith('__init__.py'): - pkg_path = os.path.dirname(scope.path) - names += self.get_module_names([pkg_path]) - for s, scope_names in evaluate.get_names_for_scope(scope, - include_builtin=False): - for n in scope_names: - if self.import_stmt.from_ns is None \ - or self.is_partial_import: - # from_ns must be defined to access module - # values plus a partial import means that there - # is something after the import, which - # automatically implies that there must not be - # any non-module scope. - continue - names.append(n) - return names - - def get_module_names(self, search_path=None): - """ - Get the names of all modules in the search_path. This means file names - and not names defined in the files. - """ - if not search_path: - search_path = self.sys_path_with_modifications() - names = [] - for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): - inf_pos = (float('inf'), float('inf')) - names.append(parsing.Name([(name, inf_pos)], inf_pos, inf_pos, - self.import_stmt)) - return names - - def sys_path_with_modifications(self): - module = self.import_stmt.get_parent_until() - return modules.sys_path_with_modifications(module) - - def follow(self, is_goto=False): - """ - Returns the imported modules. - """ - if evaluate.follow_statement.push_stmt(self.import_stmt): - # check recursion - return [] - - if self.import_path: - try: - scope, rest = self._follow_file_system() - except ModuleNotFound: - debug.warning('Module not found: ' + str(self.import_stmt)) - evaluate.follow_statement.pop_stmt() - return [] - - scopes = [scope] - scopes += itertools.chain.from_iterable( - remove_star_imports(s) for s in scopes) - - # follow the rest of the import (not FS -> classes, functions) - if len(rest) > 1 or rest and self.is_like_search: - scopes = [] - elif rest: - if is_goto: - scopes = itertools.chain.from_iterable( - evaluate.get_scopes_for_name(s, rest[0], is_goto=True) - for s in scopes) - else: - scopes = itertools.chain.from_iterable( - evaluate.follow_path(iter(rest), s, s) - for s in scopes) - scopes = list(scopes) - - if self.is_nested_import(): - scopes.append(self.get_nested_import(scope)) - else: - scopes = [ImportPath.GlobalNamespace] - debug.dbg('after import', scopes) - - evaluate.follow_statement.pop_stmt() - return scopes - - def _follow_file_system(self): - """ - Find a module with a path (of the module, like usb.backend.libusb10). - """ - def follow_str(ns, string): - debug.dbg('follow_module', ns, string) - path = None - if ns: - path = ns[1] - elif self.import_stmt.relative_count: - module = self.import_stmt.get_parent_until() - path = os.path.abspath(module.path) - for i in range(self.import_stmt.relative_count): - path = os.path.dirname(path) - - global imports_processed - imports_processed += 1 - if path is not None: - return imp.find_module(string, [path]) - else: - debug.dbg('search_module', string, self.file_path) - # Override the sys.path. It works only good that way. - # Injecting the path directly into `find_module` did not work. - sys.path, temp = sys_path_mod, sys.path - try: - i = imp.find_module(string) - except ImportError: - sys.path = temp - raise - sys.path = temp - return i - - if self.file_path: - sys_path_mod = list(self.sys_path_with_modifications()) - sys_path_mod.insert(0, self.file_path) - else: - sys_path_mod = list(builtin.get_sys_path()) - - current_namespace = None - # now execute those paths - rest = [] - for i, s in enumerate(self.import_path): - try: - current_namespace = follow_str(current_namespace, s) - except ImportError: - if current_namespace: - rest = self.import_path[i:] - else: - raise ModuleNotFound( - 'The module you searched has not been found') - - sys_path_mod.pop(0) # TODO why is this here? - path = current_namespace[1] - is_package_directory = current_namespace[2][2] == imp.PKG_DIRECTORY - - f = None - if is_package_directory or current_namespace[0]: - # is a directory module - if is_package_directory: - path += '/__init__.py' - with open(path) as f: - source = f.read() - else: - source = current_namespace[0].read() - current_namespace[0].close() - if path.endswith('.py'): - f = modules.Module(path, source) - else: - f = builtin.Parser(path=path) - else: - f = builtin.Parser(name=path) - - return f.parser.module, rest - - -def strip_imports(scopes): - """ - Here we strip the imports - they don't get resolved necessarily. - Really used anymore? Merge with remove_star_imports? - """ - result = [] - for s in scopes: - if isinstance(s, parsing.Import): - result += ImportPath(s).follow() - else: - result.append(s) - return result - - -def cache_star_import(func): - def wrapper(scope, *args, **kwargs): - try: - mods = star_import_cache[scope] - if mods[0] + settings.star_import_cache_validity > time.time(): - return mods[1] - except KeyError: - pass - # cache is too old and therefore invalid or not available - invalidate_star_import_cache(scope) - mods = func(scope, *args, **kwargs) - star_import_cache[scope] = time.time(), mods - - return mods - return wrapper - - -def invalidate_star_import_cache(module, only_main=False): - """ Important if some new modules are being reparsed """ - try: - t, mods = star_import_cache[module] - - del star_import_cache[module] - - for m in mods: - invalidate_star_import_cache(m, only_main=True) - except KeyError: - pass - - if not only_main: - # We need a list here because otherwise the list is being changed - # during the iteration in py3k: iteritems -> items. - for key, (t, mods) in list(star_import_cache.items()): - if module in mods: - invalidate_star_import_cache(key) - - -@cache_star_import -def remove_star_imports(scope, ignored_modules=[]): - """ - Check a module for star imports: - >>> from module import * - - and follow these modules. - """ - modules = strip_imports(i for i in scope.get_imports() if i.star) - new = [] - for m in modules: - if m not in ignored_modules: - new += remove_star_imports(m, modules) - modules += new - - # Filter duplicate modules. - return set(modules) diff --git a/common/.local/lib/python2.7/site-packages/jedi/keywords.py b/common/.local/lib/python2.7/site-packages/jedi/keywords.py deleted file mode 100644 index 950270b..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/keywords.py +++ /dev/null @@ -1,86 +0,0 @@ -import keyword - -from _compatibility import is_py3k -import builtin - -import pydoc -try: - from pydoc_data import topics as pydoc_topics -except ImportError: - # Python 2.6 - try: - import pydoc_topics - except ImportError: - # Python 2.5 - pydoc_topics = None - -if is_py3k: - keys = keyword.kwlist -else: - keys = keyword.kwlist + ['None', 'False', 'True'] - - -def get_keywords(string='', pos=(0, 0), all=False): - if all: - return set([Keyword(k, pos) for k in keys]) - if string in keys: - return set([Keyword(string, pos)]) - return set() - - -def get_operator(string, pos): - return Keyword(string, pos) - - -class Keyword(object): - def __init__(self, name, pos): - self.name = name - self.start_pos = pos - self.parent = builtin.Builtin.scope - - def get_parent_until(self): - return self.parent - - @property - def names(self): - """ For a `parsing.Name` like comparision """ - return [self.name] - - @property - def docstr(self): - return imitate_pydoc(self.name) - - def __repr__(self): - return '<%s: %s>' % (type(self).__name__, self.name) - - -def imitate_pydoc(string): - """ - It's not possible to get the pydoc's without starting the annoying pager - stuff. - """ - # str needed because of possible unicode stuff in py2k (pydoc doesn't work - # with unicode strings) - string = str(string) - h = pydoc.help - try: - # try to access symbols - string = h.symbols[string] - string, _, related = string.partition(' ') - except KeyError: - pass - - get_target = lambda s: h.topics.get(s, h.keywords.get(s)) - while isinstance(string, str): - string = get_target(string) - - try: - # is a tuple now - label, related = string - except TypeError: - return '' - - try: - return pydoc_topics.topics[label] if pydoc_topics else '' - except KeyError: - return '' diff --git a/common/.local/lib/python2.7/site-packages/jedi/mixin/_functools.pym b/common/.local/lib/python2.7/site-packages/jedi/mixin/_functools.pym deleted file mode 100644 index be8470f..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/mixin/_functools.pym +++ /dev/null @@ -1,9 +0,0 @@ -class partial(): - def __init__(self, func, *args, **keywords): - self.__func = func - self.__args = args - self.__keywords = keywords - - def __call__(self, *args, **kwargs): - # I know this doesn't work in Python, but Jedi can this ;-) - return self.__func(*self.__args, *args, **self.keywords, **kwargs) diff --git a/common/.local/lib/python2.7/site-packages/jedi/mixin/_sre.pym b/common/.local/lib/python2.7/site-packages/jedi/mixin/_sre.pym deleted file mode 100644 index 2b6935b..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/mixin/_sre.pym +++ /dev/null @@ -1,99 +0,0 @@ -def compile(): - class SRE_Match(): - endpos = 1 - lastgroup = 0 - lastindex = 1 - pos = 0 - string = 'a' - regs = ((0, 1),) - - def __init__(self, pattern): - self.re = pattern - - def start(self): - return 0 - - def end(self): - return 1 - - def span(self): - return 0, 1 - - def expand(self): - return '' - - def group(self): - return '' - - def groupdict(self): - return {'a', 'a'} - - def groups(self): - return ('a',) - - class SRE_Pattern(): - flags = 0 - groupindex = {} - groups = 0 - pattern = 'a' - - def findall(self): - """ - findall(string[, pos[, endpos]]) --> list. - Return a list of all non-overlapping matches of pattern in string. - """ - return ['a'] - - def finditer(self): - """ - finditer(string[, pos[, endpos]]) --> iterator. - Return an iterator over all non-overlapping matches for the - RE pattern in string. For each match, the iterator returns a - match object. - """ - yield SRE_Match(self) - - def match(self): - """ - match(string[, pos[, endpos]]) --> match object or None. - Matches zero or more characters at the beginning of the string - pattern - """ - return SRE_Match(self) - - def scanner(self): - pass - - def search(self): - """ - search(string[, pos[, endpos]]) --> match object or None. - Scan through string looking for a match, and return a corresponding - MatchObject instance. Return None if no position in the string matches. - """ - return SRE_Match(self) - - def split(self): - """ - split(string[, maxsplit = 0]) --> list. - Split string by the occurrences of pattern. - """ - return ['a'] - - def sub(self): - """ - sub(repl, string[, count = 0]) --> newstring - Return the string obtained by replacing the leftmost non-overlapping - occurrences of pattern in string by the replacement repl. - """ - return '' - - def subn(self): - """ - subn(repl, string[, count = 0]) --> (newstring, number of subs) - Return the tuple (new_string, number_of_subs_made) found by replacing - the leftmost non-overlapping occurrences of pattern with the - replacement repl. - """ - return ('', 1) - - return SRE_Pattern() diff --git a/common/.local/lib/python2.7/site-packages/jedi/mixin/_weakref.pym b/common/.local/lib/python2.7/site-packages/jedi/mixin/_weakref.pym deleted file mode 100644 index 05eab2c..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/mixin/_weakref.pym +++ /dev/null @@ -1,8 +0,0 @@ -def proxy(object, callback=None): - return object - -class ref(): - def __init__(self, object, callback=None): - self.__object = object - def __call__(self): - return self.__object diff --git a/common/.local/lib/python2.7/site-packages/jedi/mixin/builtins.pym b/common/.local/lib/python2.7/site-packages/jedi/mixin/builtins.pym deleted file mode 100644 index 28ddedf..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/mixin/builtins.pym +++ /dev/null @@ -1,218 +0,0 @@ -""" -Pure Python implementation of some builtins. -This code is not going to be executed anywhere. -These implementations are not always correct, but should work as good as -possible for the auto completion. -""" - - -def next(iterator, default=None): - if hasattr("next"): - return iterator.next() - else: - return iterator.__next__() - return default - - -def iter(collection, sentinel=None): - if sentinel: - yield collection() - else: - for c in collection: - yield c - - -def range(start, stop=None, step=1): - return [0] - - -class xrange(): - # Attention: this function doesn't exist in Py3k (there it is range). - def __iter__(self): - yield 1 - - def count(self): - return 1 - - def index(self): - return 1 - - -#-------------------------------------------------------- -# descriptors -#-------------------------------------------------------- -class property(): - def __init__(self, fget, fset=None, fdel=None, doc=None): - self.fget = fget - self.fset = fset - self.fdel = fdel - self.__doc__ = doc - - def __get__(self, obj, cls): - return self.fget(obj) - - def __set__(self, obj, value): - self.fset(obj, value) - - def __delete__(self, obj): - self.fdel(obj) - - def setter(self, func): - self.fset = func - return self - - def getter(self, func): - self.fget = func - return self - - def deleter(self, func): - self.fdel = func - return self - - -class staticmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - return self.__func - - -class classmethod(): - def __init__(self, func): - self.__func = func - - def __get__(self, obj, cls): - def _method(*args, **kwargs): - return self.__func(cls, *args, **kwargs) - return _method - - -#-------------------------------------------------------- -# array stuff -#-------------------------------------------------------- -class list(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def pop(self): - return self.__iterable[-1] - - -class tuple(): - def __init__(self, iterable=[]): - self.__iterable = [] - for i in iterable: - self.__iterable += [i] - - def __iter__(self): - for i in self.__iterable: - yield i - - def __getitem__(self, y): - return self.__iterable[y] - - def index(self): - return 1 - - def count(self): - return 1 - - -class set(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def pop(self): - return self.__iterable.pop() - - def copy(self): - return self - - def difference(self, other): - return self - other - - def intersection(self, other): - return self & other - - def symmetric_difference(self, other): - return self ^ other - - def union(self, other): - return self | other - - -class frozenset(): - def __init__(self, iterable=[]): - self.__iterable = iterable - - def __iter__(self): - for i in self.__iterable: - yield i - - def copy(self): - return self - - -class dict(): - def __init__(self, **elements): - self.__elements = elements - - def clear(self): - # has a strange docstr - pass - - def get(self, k, d=None): - # TODO implement - try: - #return self.__elements[k] - pass - except KeyError: - return d - - -class reversed(): - def __init__(self, sequence): - self.__sequence = sequence - - def __iter__(self): - for i in self.__sequence: - yield i - - def __next__(self): - return next(self.__iter__()) - - def next(self): - return self.__next__() - - -#-------------------------------------------------------- -# basic types -#-------------------------------------------------------- -class int(): - def __init__(self, x, base=None): - pass - - -class str(): - def __init__(self, obj): - pass - -class object(): - def mro(): - """ mro() -> list - return a type's method resolution order """ - return [object] diff --git a/common/.local/lib/python2.7/site-packages/jedi/mixin/datetime.pym b/common/.local/lib/python2.7/site-packages/jedi/mixin/datetime.pym deleted file mode 100644 index 823ac5b..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/mixin/datetime.pym +++ /dev/null @@ -1,4 +0,0 @@ -class datetime(): - @staticmethod - def now(): - return datetime() diff --git a/common/.local/lib/python2.7/site-packages/jedi/mixin/posix.pym b/common/.local/lib/python2.7/site-packages/jedi/mixin/posix.pym deleted file mode 100644 index 4417f7c..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/mixin/posix.pym +++ /dev/null @@ -1,5 +0,0 @@ -def getcwd(): - return '' - -def getcwdu(): - return '' diff --git a/common/.local/lib/python2.7/site-packages/jedi/modules.py b/common/.local/lib/python2.7/site-packages/jedi/modules.py deleted file mode 100644 index 2421e91..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/modules.py +++ /dev/null @@ -1,312 +0,0 @@ -from __future__ import with_statement - -from _compatibility import exec_function - -import re -import tokenize -import sys -import os -import time - -import parsing -import builtin -import debug -import evaluate -import settings -import imports - - -class Module(builtin.CachedModule): - """ - Manages all files, that are parsed and caches them. - - :param path: The module path of the file. - :param source: The source code of the file. - """ - def __init__(self, path, source): - super(Module, self).__init__(path=path) - self.source = source - self._line_cache = None - - def _get_source(self): - """ Just one time """ - s = self.source - del self.source # memory efficiency - return s - - -class ModuleWithCursor(Module): - """ - Manages all files, that are parsed and caches them. - Important are the params source and path, one of them has to - be there. - - :param source: The source code of the file. - :param path: The module path of the file or None. - :param position: The position, the user is currently in. Only important \ - for the main file. - """ - def __init__(self, path, source, position): - super(ModuleWithCursor, self).__init__(path, source) - self.position = position - - # this two are only used, because there is no nonlocal in Python 2 - self._line_temp = None - self._relevant_temp = None - - self.source = source - self._part_parser = None - - @property - def parser(self): - """ get the parser lazy """ - if not self._parser: - try: - ts, parser = builtin.CachedModule.cache[self.path] - imports.invalidate_star_import_cache(parser.module) - - del builtin.CachedModule.cache[self.path] - except KeyError: - pass - # Call the parser already here, because it will be used anyways. - # Also, the position is here important (which will not be used by - # default), therefore fill the cache here. - self._parser = parsing.PyFuzzyParser(self.source, self.path, - self.position) - if self.path is not None: - builtin.CachedModule.cache[self.path] = time.time(), \ - self._parser - return self._parser - - def get_path_until_cursor(self): - """ Get the path under the cursor. """ - result = self._get_path_until_cursor() - self._start_cursor_pos = self._line_temp + 1, self._column_temp - return result - - def _get_path_until_cursor(self, start_pos=None): - def fetch_line(): - line = self.get_line(self._line_temp) - if self._is_first: - self._is_first = False - self._line_length = self._column_temp - line = line[:self._column_temp] - else: - self._line_length = len(line) - line = line + '\n' - # add lines with a backslash at the end - while 1: - self._line_temp -= 1 - last_line = self.get_line(self._line_temp) - if last_line and last_line[-1] == '\\': - line = last_line[:-1] + ' ' + line - else: - break - return line[::-1] - - self._is_first = True - if start_pos is None: - self._line_temp = self.position[0] - self._column_temp = self.position[1] - else: - self._line_temp, self._column_temp = start_pos - - open_brackets = ['(', '[', '{'] - close_brackets = [')', ']', '}'] - - gen = tokenize.generate_tokens(fetch_line) - string = '' - level = 0 - force_point = False - try: - for token_type, tok, start, end, line in gen: - #print 'tok', token_type, tok, force_point - if level > 0: - if tok in close_brackets: - level += 1 - if tok in open_brackets: - level -= 1 - elif tok == '.': - force_point = False - elif force_point: - # it is reversed, therefore a number is getting recognized - # as a floating point number - if token_type == tokenize.NUMBER and tok[0] == '.': - force_point = False - else: - break - elif tok in close_brackets: - level += 1 - elif token_type in [tokenize.NAME, tokenize.STRING]: - force_point = True - elif token_type == tokenize.NUMBER: - pass - else: - break - - self._column_temp = self._line_length - end[1] - string += tok - except tokenize.TokenError: - debug.warning("Tokenize couldn't finish", sys.exc_info) - - return string[::-1] - - def get_path_under_cursor(self): - """ - Return the path under the cursor. If there is a rest of the path left, - it will be added to the stuff before it. - """ - line = self.get_line(self.position[0]) - after = re.search("[\w\d]*", line[self.position[1]:]).group(0) - return self.get_path_until_cursor() + after - - def get_operator_under_cursor(self): - line = self.get_line(self.position[0]) - after = re.match("[^\w\s]+", line[self.position[1]:]) - before = re.match("[^\w\s]+", line[:self.position[1]][::-1]) - return (before.group(0) if before is not None else '') \ - + (after.group(0) if after is not None else '') - - def get_context(self): - pos = self._start_cursor_pos - while pos > (1, 0): - # remove non important white space - line = self.get_line(pos[0]) - while pos[1] > 0 and line[pos[1] - 1].isspace(): - pos = pos[0], pos[1] - 1 - - try: - yield self._get_path_until_cursor(start_pos=pos) - except StopIteration: - yield '' - pos = self._line_temp, self._column_temp - - while True: - yield '' - - def get_line(self, line_nr): - if not self._line_cache: - self._line_cache = self.source.split('\n') - - if line_nr == 0: - # This is a fix for the zeroth line. We need a newline there, for - # the backwards parser. - return '' - if line_nr < 0: - raise StopIteration() - try: - return self._line_cache[line_nr - 1] - except IndexError: - raise StopIteration() - - def get_part_parser(self): - """ Returns a parser that contains only part of the source code. This - exists only because of performance reasons. - """ - if self._part_parser: - return self._part_parser - - # TODO check for docstrings - length = settings.part_line_length - offset = max(self.position[0] - length, 0) - s = '\n'.join(self.source.split('\n')[offset:offset + length]) - self._part_parser = parsing.PyFuzzyParser(s, self.path, self.position, - line_offset=offset) - return self._part_parser - - -@evaluate.memoize_default([]) -def sys_path_with_modifications(module): - def execute_code(code): - c = "import os; from os.path import *; result=%s" - variables = {'__file__': module.path} - try: - exec_function(c % code, variables) - except Exception: - debug.warning('sys path detected, but failed to evaluate') - return None - try: - res = variables['result'] - if isinstance(res, str): - return os.path.abspath(res) - else: - return None - except KeyError: - return None - - def check_module(module): - try: - possible_stmts = module.used_names['path'] - except KeyError: - return builtin.get_sys_path() - - sys_path = list(builtin.get_sys_path()) # copy - for p in possible_stmts: - try: - call = p.get_assignment_calls().get_only_subelement() - except AttributeError: - continue - n = call.name - if not isinstance(n, parsing.Name) or len(n.names) != 3: - continue - if n.names[:2] != ('sys', 'path'): - continue - array_cmd = n.names[2] - if call.execution is None: - continue - exe = call.execution - if not (array_cmd == 'insert' and len(exe) == 2 - or array_cmd == 'append' and len(exe) == 1): - continue - - if array_cmd == 'insert': - exe_type, exe.type = exe.type, parsing.Array.NOARRAY - exe_pop = exe.values.pop(0) - res = execute_code(exe.get_code()) - if res is not None: - sys_path.insert(0, res) - debug.dbg('sys path inserted: %s' % res) - exe.type = exe_type - exe.values.insert(0, exe_pop) - elif array_cmd == 'append': - res = execute_code(exe.get_code()) - if res is not None: - sys_path.append(res) - debug.dbg('sys path added: %s' % res) - return sys_path - - if module.path is None: - return [] # support for modules without a path is intentionally bad. - - curdir = os.path.abspath(os.curdir) - try: - os.chdir(os.path.dirname(module.path)) - except OSError: - pass - - result = check_module(module) - result += detect_django_path(module.path) - - # cleanup, back to old directory - os.chdir(curdir) - return result - - -def detect_django_path(module_path): - """ Detects the path of the very well known Django library (if used) """ - result = [] - while True: - new = os.path.dirname(module_path) - # If the module_path doesn't change anymore, we're finished -> / - if new == module_path: - break - else: - module_path = new - - try: - with open(module_path + os.path.sep + 'manage.py'): - debug.dbg('Found django path: %s' % module_path) - result.append(module_path) - except IOError: - pass - return result diff --git a/common/.local/lib/python2.7/site-packages/jedi/parsing.py b/common/.local/lib/python2.7/site-packages/jedi/parsing.py deleted file mode 100644 index d56f7ae..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/parsing.py +++ /dev/null @@ -1,1855 +0,0 @@ -""" -Maintainer: David Halter -Version: 0.1 - -py_fuzzyparser parses python code, with the goal of a good representation of -the code within a tree structure. Variables, Classes and Functions are defined -within this tree structure, containing their exact locations in the code. -It is also a primary goal to work with code which contains syntax errors. - -This behaviour may be used to refactor, modify, search and complete code. - -To understand this code it is extremely important to understand the behaviour -of the python module 'tokenize'. - -This original codebase of this parser, which has been refactored and heavily -changed, was programmed by Aaron Griffin . - -**The structure of the following script:** -A Scope has - - imports (Import) - - subscopes (Scope, Class, Function, Flow) - - statements (Statement) - -All these objects have `Name`s. `Call` and `Array` are used as detail objects -of a statement. - -All those classes are being generated by PyFuzzyParser, which takes python text -as input and ignores just all the non-python stuff. Basically you could feed it -a perl script, and it should still work (which means throw no error. -""" -from _compatibility import (next, literal_eval, StringIO, - property, cleandoc, Python3Method) - -import tokenize -import re -import keyword -import os - -import debug - - -class ParserError(Exception): - pass - - -def indent_block(text, indention=' '): - """ This function indents a text block with a default of four spaces """ - temp = '' - while text and text[-1] == '\n': - temp += text[-1] - text = text[:-1] - lines = text.split('\n') - return '\n'.join(map(lambda s: indention + s, lines)) + temp - - -class PushBackIterator(object): - def __init__(self, iterator): - self.pushes = [] - self.iterator = iterator - - def push_back(self, value): - self.pushes.append(value) - - def __iter__(self): - return self - - def next(self): - """ Python 2 Compatibility """ - return self.__next__() - - def __next__(self): - if self.pushes: - return self.pushes.pop() - else: - return next(self.iterator) - - -class Base(object): - """ - This is just here to have an isinstance check, which is also used on - evaluate classes. But since they have sometimes a special type of - delegation, it is important for those classes to override this method. - - I know that there is a chance to do such things with __instancecheck__, but - since Python 2.5 doesn't support it, I decided to do it this way. - """ - def isinstance(self, *cls): - return isinstance(self, cls) - - -class Simple(Base): - """ - The super class for Scope, Import, Name and Statement. Every object in - the parser tree inherits from this class. - """ - def __init__(self, start_pos, end_pos=(None, None)): - self.start_pos = start_pos - self.end_pos = end_pos - self.parent = None - - @Python3Method - def get_parent_until(self, classes=(), reverse=False, - include_current=False): - """ Takes always the parent, until one class (not a Class) """ - if type(classes) not in (tuple, list): - classes = (classes,) - scope = self - while scope.parent is not None: - if classes and reverse != scope.isinstance(*classes): - if include_current: - return scope - break - scope = scope.parent - return scope - - def __repr__(self): - code = self.get_code().replace('\n', ' ') - return "<%s: %s@%s>" % \ - (type(self).__name__, code, self.start_pos[0]) - - -class Scope(Simple): - """ - Super class for the parser tree, which represents the state of a python - text file. - A Scope manages and owns its subscopes, which are classes and functions, as - well as variables and imports. It is used to access the structure of python - files. - - :param start_pos: The position (line and column) of the scope. - :type start_pos: tuple(int, int) - :param docstr: The docstring for the current Scope. - :type docstr: str - """ - def __init__(self, start_pos, docstr=''): - super(Scope, self).__init__(start_pos) - self.subscopes = [] - self.imports = [] - self.statements = [] - self.docstr = docstr - self.asserts = [] - - def add_scope(self, sub, decorators): - sub.parent = self - sub.decorators = decorators - for d in decorators: - # the parent is the same, because the decorator has not the scope - # of the function - d.parent = sub.parent - self.subscopes.append(sub) - return sub - - def add_statement(self, stmt): - """ - Used to add a Statement or a Scope. - A statement would be a normal command (Statement) or a Scope (Flow). - """ - stmt.parent = self - self.statements.append(stmt) - return stmt - - def add_docstr(self, string): - """ Clean up a docstring """ - self.docstr = cleandoc(literal_eval(string)) - - def add_import(self, imp): - self.imports.append(imp) - imp.parent = self - - def get_imports(self): - """ Gets also the imports within flow statements """ - i = [] + self.imports - for s in self.statements: - if isinstance(s, Scope): - i += s.get_imports() - return i - - def get_code(self, first_indent=False, indention=' '): - """ - :return: Returns the code of the current scope. - :rtype: str - """ - string = "" - if len(self.docstr) > 0: - string += '"""' + self.docstr + '"""\n' - for i in self.imports: - string += i.get_code() - for sub in self.subscopes: - string += sub.get_code(first_indent=True, indention=indention) - for stmt in self.statements: - string += stmt.get_code() - - if first_indent: - string = indent_block(string, indention=indention) - return string - - @Python3Method - def get_set_vars(self): - """ - Get all the names, that are active and accessible in the current - scope. - - :return: list of Name - :rtype: list - """ - n = [] - for stmt in self.statements: - try: - n += stmt.get_set_vars(True) - except TypeError: - n += stmt.get_set_vars() - - # function and class names - n += [s.name for s in self.subscopes] - - for i in self.imports: - if not i.star: - n += i.get_defined_names() - return n - - def get_defined_names(self): - return [n for n in self.get_set_vars() - if isinstance(n, Import) or len(n) == 1] - - def is_empty(self): - """ - :return: True if there are no subscopes, imports and statements. - :rtype: bool - """ - return not (self.imports or self.subscopes or self.statements) - - @Python3Method - def get_statement_for_position(self, pos): - checks = self.statements + self.asserts - if self.isinstance(Function): - checks += self.params + self.decorators + self.returns - for s in checks: - if isinstance(s, Flow): - p = s.get_statement_for_position(pos) - while s.next and not p: - s = s.next - p = s.get_statement_for_position(pos) - if p: - return p - elif s.start_pos <= pos < s.end_pos: - return s - - for s in self.subscopes: - if s.start_pos <= pos <= s.end_pos: - p = s.get_statement_for_position(pos) - if p: - return p - - def __repr__(self): - try: - name = self.path - except AttributeError: - try: - name = self.name - except AttributeError: - name = self.command - - return "<%s: %s@%s-%s>" % (type(self).__name__, name, - self.start_pos[0], self.end_pos[0]) - - -class Module(Scope): - """ - The top scope, which is always a module. - """ - def __init__(self, path, docstr=''): - super(Module, self).__init__((1, 0), docstr) - self.path = path - self.global_vars = [] - self._name = None - self.used_names = {} - self.temp_used_names = [] - - def add_global(self, name): - """ - Global means in these context a function (subscope) which has a global - statement. - This is only relevant for the top scope. - - :param name: The name of the global. - :type name: Name - """ - self.global_vars.append(name) - # set no parent here, because globals are not defined in this scope. - - def get_set_vars(self): - n = super(Module, self).get_set_vars() - n += self.global_vars - return n - - @property - def name(self): - """ This is used for the goto function. """ - if self._name is not None: - return self._name - if self.path is None: - string = '' # no path -> empty name - else: - sep = (re.escape(os.path.sep),) * 2 - r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, - self.path) - string = r.group(1) - names = [(string, (0, 0))] - self._name = Name(names, self.start_pos, self.end_pos, self) - return self._name - - def is_builtin(self): - return not (self.path is None or self.path.endswith('.py')) - - -class Class(Scope): - """ - Used to store the parsed contents of a python class. - - :param name: The Class name. - :type name: string - :param supers: The super classes of a Class. - :type supers: list - :param start_pos: The start position (line, column) of the class. - :type start_pos: tuple(int, int) - :param docstr: The docstring for the current Scope. - :type docstr: str - """ - def __init__(self, name, supers, start_pos, docstr=''): - super(Class, self).__init__(start_pos, docstr) - self.name = name - name.parent = self - self.supers = supers - for s in self.supers: - s.parent = self - self.decorators = [] - - def get_code(self, first_indent=False, indention=' '): - string = "\n".join('@' + stmt.get_code() for stmt in self.decorators) - string += 'class %s' % (self.name) - if len(self.supers) > 0: - sup = ','.join(stmt.code for stmt in self.supers) - string += '(%s)' % sup - string += ':\n' - string += super(Class, self).get_code(True, indention) - if self.is_empty(): - string += "pass\n" - return string - - -class Function(Scope): - """ - Used to store the parsed contents of a python function. - - :param name: The Function name. - :type name: string - :param params: The parameters (Statement) of a Function. - :type params: list - :param start_pos: The start position (line, column) the Function. - :type start_pos: tuple(int, int) - :param docstr: The docstring for the current Scope. - :type docstr: str - """ - def __init__(self, name, params, start_pos, annotation): - Scope.__init__(self, start_pos) - self.name = name - name.parent = self - self.params = params - for p in params: - p.parent = self - p.parent_function = self - self.decorators = [] - self.returns = [] - self.is_generator = False - self.listeners = set() # not used here, but in evaluation. - - if annotation is not None: - annotation.parent = self - self.annotation = annotation - - def get_code(self, first_indent=False, indention=' '): - string = "\n".join('@' + stmt.get_code() for stmt in self.decorators) - params = ','.join([stmt.code for stmt in self.params]) - string += "def %s(%s):\n" % (self.name, params) - string += super(Function, self).get_code(True, indention) - if self.is_empty(): - string += "pass\n" - return string - - def get_set_vars(self): - n = super(Function, self).get_set_vars() - for p in self.params: - try: - n.append(p.get_name()) - except IndexError: - debug.warning("multiple names in param %s" % n) - return n - - def get_call_signature(self, width=72): - """ - Generate call signature of this function. - - :param width: Fold lines if a line is longer than this value. - :type width: int - - :rtype: str - """ - l = self.name.names[-1] + '(' - lines = [] - for (i, p) in enumerate(self.params): - code = p.get_code(False) - if i != len(self.params) - 1: - code += ', ' - if len(l + code) > width: - lines.append(l[:-1] if l[-1] == ' ' else l) - l = code - else: - l += code - if l: - lines.append(l) - lines[-1] += ')' - return '\n'.join(lines) - - @property - def doc(self): - """ Return a document string including call signature. """ - return '%s\n\n%s' % (self.get_call_signature(), self.docstr) - - -class Flow(Scope): - """ - Used to describe programming structure - flow statements, - which indent code, but are not classes or functions: - - - for - - while - - if - - try - - with - - Therefore statements like else, except and finally are also here, - they are now saved in the root flow elements, but in the next variable. - - :param command: The flow command, if, while, else, etc. - :type command: str - :param inits: The initializations of a flow -> while 'statement'. - :type inits: list(Statement) - :param start_pos: Position (line, column) of the Flow statement. - :type start_pos: tuple(int, int) - :param set_vars: Local variables used in the for loop (only there). - :type set_vars: list - """ - def __init__(self, command, inits, start_pos, set_vars=None): - self.next = None - self.command = command - super(Flow, self).__init__(start_pos, '') - self._parent = None - # These have to be statements, because of with, which takes multiple. - self.inits = inits - for s in inits: - s.parent = self - if set_vars is None: - self.set_vars = [] - else: - self.set_vars = set_vars - for s in self.set_vars: - s.parent.parent = self - s.parent = self - - @property - def parent(self): - return self._parent - - @parent.setter - def parent(self, value): - self._parent = value - if self.next: - self.next.parent = value - - def get_code(self, first_indent=False, indention=' '): - stmts = [] - for s in self.inits: - stmts.append(s.get_code(new_line=False)) - stmt = ', '.join(stmts) - string = "%s %s:\n" % (self.command, vars, stmt) - string += super(Flow, self).get_code(True, indention) - if self.next: - string += self.next.get_code() - return string - - def get_set_vars(self, is_internal_call=False): - """ - Get the names for the flow. This includes also a call to the super - class. - :param is_internal_call: defines an option for internal files to crawl\ - through this class. Normally it will just call its superiors, to\ - generate the output. - """ - if is_internal_call: - n = list(self.set_vars) - for s in self.inits: - n += s.set_vars - if self.next: - n += self.next.get_set_vars(is_internal_call) - n += super(Flow, self).get_set_vars() - return n - else: - return self.get_parent_until((Class, Function)).get_set_vars() - - def get_imports(self): - i = super(Flow, self).get_imports() - if self.next: - i += self.next.get_imports() - return i - - def set_next(self, next): - """ Set the next element in the flow, those are else, except, etc. """ - if self.next: - return self.next.set_next(next) - else: - self.next = next - self.next.parent = self.parent - return next - - -class ForFlow(Flow): - """ - Used for the for loop, because there are two statement parts. - """ - def __init__(self, inits, start_pos, set_stmt, is_list_comp=False): - super(ForFlow, self).__init__('for', inits, start_pos, - set_stmt.used_vars) - self.set_stmt = set_stmt - self.is_list_comp = is_list_comp - - def get_code(self, first_indent=False, indention=" " * 4): - vars = ",".join(x.get_code() for x in self.set_vars) - stmts = [] - for s in self.inits: - stmts.append(s.get_code(new_line=False)) - stmt = ', '.join(stmts) - s = "for %s in %s:\n" % (vars, stmt) - return s + super(Flow, self).get_code(True, indention) - - -class Import(Simple): - """ - Stores the imports of any Scopes. - - >>> 1+1 - 2 - - :param start_pos: Position (line, column) of the Import. - :type start_pos: tuple(int, int) - :param namespace: The import, can be empty if a star is given - :type namespace: Name - :param alias: The alias of a namespace(valid in the current namespace). - :type alias: Name - :param from_ns: Like the namespace, can be equally used. - :type from_ns: Name - :param star: If a star is used -> from time import *. - :type star: bool - :param defunct: An Import is valid or not. - :type defunct: bool - """ - def __init__(self, start_pos, end_pos, namespace, alias=None, - from_ns=None, star=False, relative_count=0, defunct=False): - super(Import, self).__init__(start_pos, end_pos) - - self.namespace = namespace - self.alias = alias - self.from_ns = from_ns - for n in [namespace, alias, from_ns]: - if n: - n.parent = self - - self.star = star - self.relative_count = relative_count - self.defunct = defunct - - def get_code(self, new_line=True): - # in case one of the names is None - alias = self.alias or '' - namespace = self.namespace or '' - from_ns = self.from_ns or '' - - if self.alias: - ns_str = "%s as %s" % (namespace, alias) - else: - ns_str = str(namespace) - - nl = '\n' if new_line else '' - if self.from_ns or self.relative_count: - if self.star: - ns_str = '*' - dots = '.' * self.relative_count - return "from %s%s import %s%s" % (dots, from_ns, ns_str, nl) - else: - return "import %s%s" % (ns_str, nl) - - def get_defined_names(self): - if self.defunct: - return [] - if self.star: - return [self] - if self.alias: - return [self.alias] - if len(self.namespace) > 1: - o = self.namespace - n = Name([(o.names[0], o.start_pos)], o.start_pos, o.end_pos, - parent=o.parent) - return [n] - else: - return [self.namespace] - - def get_set_vars(self): - return self.get_defined_names() - - def get_all_import_names(self): - n = [] - if self.from_ns: - n.append(self.from_ns) - if self.namespace: - n.append(self.namespace) - if self.alias: - n.append(self.alias) - return n - - -class Statement(Simple): - """ - This is the class for all the possible statements. Which means, this class - stores pretty much all the Python code, except functions, classes, imports, - and flow functions like if, for, etc. - - :param code: The full code of a statement. This is import, if one wants \ - to execute the code at some level. - :param code: str - :param set_vars: The variables which are defined by the statement. - :param set_vars: str - :param used_funcs: The functions which are used by the statement. - :param used_funcs: str - :param used_vars: The variables which are used by the statement. - :param used_vars: str - :param token_list: Token list which is also peppered with Name. - :param token_list: list - :param start_pos: Position (line, column) of the Statement. - :type start_pos: tuple(int, int) - """ - def __init__(self, code, set_vars, used_funcs, used_vars, token_list, - start_pos, end_pos): - super(Statement, self).__init__(start_pos, end_pos) - self.code = code - self.used_funcs = used_funcs - self.used_vars = used_vars - self.token_list = token_list - for s in set_vars + used_funcs + used_vars: - s.parent = self - self.set_vars = self._remove_executions_from_set_vars(set_vars) - - # cache - self._assignment_calls = None - self._assignment_details = None - # this is important for other scripts - self._assignment_calls_calculated = False - - def _remove_executions_from_set_vars(self, set_vars): - """ - Important mainly for assosiative arrays: - - >>> a = 3 - >>> b = {} - >>> b[a] = 3 - - `a` is in this case not a set_var, it is used to index the dict. - """ - - if not set_vars: - return set_vars - result = set(set_vars) - last = None - in_execution = 0 - for tok in self.token_list: - if isinstance(tok, Name): - if tok not in result: - break - if in_execution: - result.remove(tok) - elif isinstance(tok, tuple): - tok = tok[1] - if tok in ['(', '['] and isinstance(last, Name): - in_execution += 1 - elif tok in [')', ']'] and in_execution > 0: - in_execution -= 1 - last = tok - return list(result) - - def get_code(self, new_line=True): - if new_line: - return self.code + '\n' - else: - return self.code - - def get_set_vars(self): - """ Get the names for the statement. """ - return list(self.set_vars) - - @property - def assignment_details(self): - if self._assignment_details is None: - # normally, this calls sets this variable - self.get_assignment_calls() - # it may not have been set by get_assignment_calls -> just use an empty - # array - return self._assignment_details or [] - - def is_global(self): - # first keyword of the first token is global -> must be a global - return str(self.token_list[0]) == "global" - - def get_assignment_calls(self): - """ - This is not done in the main parser, because it might be slow and - most of the statements won't need this data anyway. This is something - 'like' a lazy execution. - - This is not really nice written, sorry for that. If you plan to replace - it and make it nicer, that would be cool :-) - """ - if self._assignment_calls_calculated: - return self._assignment_calls - self._assignment_details = [] - top = result = Array(self.start_pos, Array.NOARRAY, self) - level = 0 - is_chain = False - close_brackets = False - - tok_iter = enumerate(self.token_list) - for i, tok_temp in tok_iter: - #print 'tok', tok_temp, result - if isinstance(tok_temp, ListComprehension): - result.add_to_current_field(tok_temp) - continue - try: - token_type, tok, start_pos = tok_temp - except TypeError: - # the token is a Name, which has already been parsed - tok = tok_temp - token_type = None - start_pos = tok.start_pos - except ValueError: - debug.warning("unkown value, shouldn't happen", - tok_temp, type(tok_temp)) - raise - else: - if tok in ['return', 'yield'] or level == 0 and \ - tok.endswith('=') and not tok in ['>=', '<=', '==', '!=']: - # This means, there is an assignment here. - - # Add assignments, which can be more than one - self._assignment_details.append((tok, top)) - # All these calls wouldn't be important if nonlocal would - # exist. -> Initialize the first item again. - result = Array(start_pos, Array.NOARRAY, self) - top = result - level = 0 - close_brackets = False - is_chain = False - continue - elif tok == 'as': - next(tok_iter, None) - continue - - brackets = {'(': Array.TUPLE, '[': Array.LIST, '{': Array.SET} - is_call = lambda: type(result) == Call - is_call_or_close = lambda: is_call() or close_brackets - - is_literal = token_type in [tokenize.STRING, tokenize.NUMBER] - if isinstance(tok, Name) or is_literal: - c_type = Call.NAME - if is_literal: - tok = literal_eval(tok) - if token_type == tokenize.STRING: - c_type = Call.STRING - elif token_type == tokenize.NUMBER: - c_type = Call.NUMBER - - if is_chain: - call = Call(tok, c_type, start_pos, parent=result) - result = result.set_next_chain_call(call) - is_chain = False - close_brackets = False - else: - if close_brackets: - result = result.parent - close_brackets = False - if type(result) == Call: - result = result.parent - call = Call(tok, c_type, start_pos, parent=result) - result.add_to_current_field(call) - result = call - elif tok in brackets.keys(): # brackets - level += 1 - if is_call_or_close(): - result = Array(start_pos, brackets[tok], parent=result) - result = result.parent.add_execution(result) - close_brackets = False - else: - result = Array(start_pos, brackets[tok], parent=result) - result.parent.add_to_current_field(result) - elif tok == ':': - while is_call_or_close(): - result = result.parent - close_brackets = False - if result.type == Array.LIST: # [:] lookups - result.add_to_current_field(tok) - else: - result.add_dictionary_key() - elif tok == '.': - if close_brackets and result.parent != top: - # only get out of the array, if it is a array execution - result = result.parent - close_brackets = False - is_chain = True - elif tok == ',': - while is_call_or_close(): - result = result.parent - close_brackets = False - result.add_field((start_pos[0], start_pos[1] + 1)) - # important - it cannot be empty anymore - if result.type == Array.NOARRAY: - result.type = Array.TUPLE - elif tok in [')', '}', ']']: - while is_call_or_close(): - result = result.parent - close_brackets = False - if tok == '}' and not len(result): - # this is a really special case - empty brackets {} are - # always dictionaries and not sets. - result.type = Array.DICT - level -= 1 - result.end_pos = start_pos[0], start_pos[1] + 1 - close_brackets = True - else: - while is_call_or_close(): - result = result.parent - close_brackets = False - if tok != '\n': - result.add_to_current_field(tok) - - if level != 0: - debug.warning("Brackets don't match: %s." - "This is not normal behaviour." % level) - - self._assignment_calls_calculated = True - self._assignment_calls = top - return top - - -class Param(Statement): - """ - The class which shows definitions of params of classes and functions. - But this is not to define function calls. - """ - def __init__(self, code, set_vars, used_funcs, used_vars, - token_list, start_pos, end_pos): - super(Param, self).__init__(code, set_vars, used_funcs, used_vars, - token_list, start_pos, end_pos) - - # this is defined by the parser later on, not at the initialization - # it is the position in the call (first argument, second...) - self.position_nr = None - self.is_generated = False - self.annotation_stmt = None - self.parent_function = None - - def add_annotation(self, annotation_stmt): - annotation_stmt.parent = self - self.annotation_stmt = annotation_stmt - - def get_name(self): - """ get the name of the param """ - n = self.set_vars or self.used_vars - if len(n) > 1: - debug.warning("Multiple param names (%s)." % n) - return n[0] - - -class Call(Base): - """ - `Call` contains a call, e.g. `foo.bar` and owns the executions of those - calls, which are `Array`s. - """ - NAME = 1 - NUMBER = 2 - STRING = 3 - - def __init__(self, name, type, start_pos, parent_stmt=None, parent=None): - self.name = name - # parent is not the oposite of next. The parent of c: a = [b.c] would - # be an array. - self.parent = parent - self.type = type - self.start_pos = start_pos - - self.next = None - self.execution = None - self._parent_stmt = parent_stmt - - @property - def parent_stmt(self): - if self._parent_stmt is not None: - return self._parent_stmt - elif self.parent: - return self.parent.parent_stmt - else: - return None - - @parent_stmt.setter - def parent_stmt(self, value): - self._parent_stmt = value - - def set_next_chain_call(self, call): - """ Adds another part of the statement""" - self.next = call - call.parent = self.parent - return call - - def add_execution(self, call): - """ - An execution is nothing else than brackets, with params in them, which - shows access on the internals of this name. - """ - self.execution = call - # there might be multiple executions, like a()[0], in that case, they - # have the same parent. Otherwise it's not possible to parse proper. - if self.parent.execution == self: - call.parent = self.parent - else: - call.parent = self - return call - - def generate_call_path(self): - """ Helps to get the order in which statements are executed. """ - # TODO include previous nodes? As an option? - try: - for name_part in self.name.names: - yield name_part - except AttributeError: - yield self - if self.execution is not None: - for y in self.execution.generate_call_path(): - yield y - if self.next is not None: - for y in self.next.generate_call_path(): - yield y - - def get_code(self): - if self.type == Call.NAME: - s = self.name.get_code() - else: - s = repr(self.name) - if self.execution is not None: - s += '(%s)' % self.execution.get_code() - if self.next is not None: - s += self.next.get_code() - return s - - def __repr__(self): - return "<%s: %s>" % \ - (type(self).__name__, self.name) - - -class Array(Call): - """ - Describes the different python types for an array, but also empty - statements. In the Python syntax definitions this type is named 'atom'. - http://docs.python.org/py3k/reference/grammar.html - Array saves sub-arrays as well as normal operators and calls to methods. - - :param array_type: The type of an array, which can be one of the constants\ - below. - :type array_type: int - """ - NOARRAY = None - TUPLE = 'tuple' - LIST = 'list' - DICT = 'dict' - SET = 'set' - - def __init__(self, start_pos, arr_type=NOARRAY, parent_stmt=None, - parent=None, values=None): - super(Array, self).__init__(None, arr_type, start_pos, parent_stmt, - parent) - - self.values = values if values else [] - self.arr_el_pos = [] - self.keys = [] - self.end_pos = None - - def add_field(self, start_pos): - """ - Just add a new field to the values. - - Each value has a sub-array, because there may be different tokens in - one array. - """ - self.arr_el_pos.append(start_pos) - self.values.append([]) - - def add_to_current_field(self, tok): - """ Adds a token to the latest field (in content). """ - if not self.values: - # An empty round brace is just a tuple, filled it is unnecessary. - if self.type == Array.TUPLE: - self.type = Array.NOARRAY - # Add the first field, this is done here, because if nothing - # gets added, the list is empty, which is also needed sometimes. - self.values.append([]) - self.values[-1].append(tok) - - def add_dictionary_key(self): - """ - Only used for dictionaries, automatically adds the tokens added by now - from the values to keys, because the parser works this way. - """ - if self.type in (Array.LIST, Array.TUPLE): - return # these are basically code errors, just ignore - self.keys.append(self.values.pop()) - if self.type == Array.SET: - self.type = Array.DICT - self.values.append([]) - - def get_only_subelement(self): - """ - Returns the only element that an array contains. If it contains - more than one element, raise an exception. - """ - if len(self.values) != 1 or len(self.values[0]) != 1: - raise AttributeError("More than one value found") - return self.values[0][0] - - @staticmethod - def is_type(instance, *types): - """ - This is not only used for calls on the actual object, but for - ducktyping, to invoke this function with anything as `self`. - """ - if isinstance(instance, Array): - if instance.type in types: - return True - return False - - def __len__(self): - return len(self.values) - - def __getitem__(self, key): - return self.values[key] - - def __iter__(self): - if self.type == self.DICT: - return iter(zip(self.keys, self.values)) - else: - return iter(self.values) - - def get_code(self): - def to_str(el): - try: - return el.get_code() - except AttributeError: - return str(el) - - map = {Array.NOARRAY: '%s', - Array.TUPLE: '(%s)', - Array.LIST: '[%s]', - Array.DICT: '{%s}', - Array.SET: '{%s}' - } - inner = [] - for i, value in enumerate(self.values): - s = '' - try: - key = self.keys[i] - except IndexError: - pass - else: - for el in key[i]: - s += to_str(el) - for el in value: - s += to_str(el) - inner.append(s) - return map[self.type] % ', '.join(inner) - - def __repr__(self): - if self.type == self.NOARRAY: - typ = 'noarray' - else: - typ = self.type - return "<%s: %s%s>" % (type(self).__name__, typ, self.values) - - -class NamePart(str): - """ - A string. Sometimes it is important to know if the string belongs to a name - or not. - """ - def __new__(cls, s, start_pos): - self = super(NamePart, cls).__new__(cls, s) - self.start_pos = start_pos - return self - - @property - def end_pos(self): - return self.start_pos[0], self.start_pos[1] + len(self) - - -class Name(Simple): - """ - Used to define names in python. - Which means the whole namespace/class/function stuff. - So a name like "module.class.function" - would result in an array of [module, class, function] - """ - def __init__(self, names, start_pos, end_pos, parent=None): - super(Name, self).__init__(start_pos, end_pos) - self.names = tuple(n if isinstance(n, NamePart) else NamePart(*n) - for n in names) - if parent is not None: - self.parent = parent - - def get_code(self): - """ Returns the names in a full string format """ - return ".".join(self.names) - - def __str__(self): - return self.get_code() - - def __len__(self): - return len(self.names) - - -class ListComprehension(object): - """ Helper class for list comprehensions """ - def __init__(self, stmt, middle, input): - self.stmt = stmt - self.middle = middle - self.input = input - - def __repr__(self): - return "<%s: %s>" % \ - (type(self).__name__, self.get_code()) - - def get_code(self): - statements = self.stmt, self.middle, self.input - code = [s.get_code().replace('\n', '') for s in statements] - return "%s for %s in %s" % tuple(code) - - -class PyFuzzyParser(object): - """ - This class is used to parse a Python file, it then divides them into a - class structure of different scopes. - - :param code: The codebase for the parser. - :type code: str - :param user_position: The line/column, the user is currently on. - :type user_position: tuple(int, int) - """ - def __init__(self, code, module_path=None, user_position=None, - no_docstr=False, line_offset=0): - self.user_position = user_position - self.user_scope = None - self.user_stmt = None - self.code = code + '\n' # end with \n, because the parser needs it - self.no_docstr = no_docstr - - # initialize global Scope - self.module = Module(module_path) - self.scope = self.module - self.current = (None, None) - - # Stuff to fix tokenize errors. The parser is pretty good in tolerating - # any errors of tokenize and just parse ahead. - self._line_of_tokenize_restart = line_offset - - self.parse() - - # delete code again, only the parser needs it - del self.code - - def __repr__(self): - return "<%s: %s>" % (type(self).__name__, self.module) - - @property - def start_pos(self): - return (self._line_of_tokenize_restart + self._tokenize_start_pos[0], - self._tokenize_start_pos[1]) - - @property - def end_pos(self): - return (self._line_of_tokenize_restart + self._tokenize_end_pos[0], - self._tokenize_end_pos[1]) - - def _check_user_stmt(self, simple): - if not isinstance(simple, Param): - for tok_name in self.module.temp_used_names: - try: - self.module.used_names[tok_name].add(simple) - except KeyError: - self.module.used_names[tok_name] = set([simple]) - self.module.temp_used_names = [] - - if not self.user_position: - return - # the position is right - if simple.start_pos <= self.user_position <= simple.end_pos: - if self.user_stmt is not None: - # if there is already a user position (another import, because - # imports are splitted) the names are checked. - for n in simple.get_set_vars(): - if n.start_pos < self.user_position <= n.end_pos: - self.user_stmt = simple - else: - self.user_stmt = simple - - def _parsedotname(self, pre_used_token=None): - """ - The dot name parser parses a name, variable or function and returns - their names. - - :return: Tuple of Name, token_type, nexttoken. - :rtype: tuple(Name, int, str) - """ - def append(el): - names.append(el) - self.module.temp_used_names.append(el[0]) - - names = [] - if pre_used_token is None: - token_type, tok = self.next() - if token_type != tokenize.NAME and tok != '*': - return [], token_type, tok - else: - token_type, tok = pre_used_token - - if token_type != tokenize.NAME and tok != '*': - # token maybe a name or star - return None, token_type, tok - - append((tok, self.start_pos)) - first_pos = self.start_pos - while True: - token_type, tok = self.next() - if tok != '.': - break - token_type, tok = self.next() - if token_type != tokenize.NAME: - break - append((tok, self.start_pos)) - - n = Name(names, first_pos, self.end_pos) if names else None - return n, token_type, tok - - def _parseimportlist(self): - """ - The parser for the imports. Unlike the class and function parse - function, this returns no Import class, but rather an import list, - which is then added later on. - The reason, why this is not done in the same class lies in the nature - of imports. There are two ways to write them: - - - from ... import ... - - import ... - - To distinguish, this has to be processed after the parser. - - :return: List of imports. - :rtype: list - """ - imports = [] - brackets = False - continue_kw = [",", ";", "\n", ')'] \ - + list(set(keyword.kwlist) - set(['as'])) - while True: - defunct = False - token_type, tok = self.next() - if token_type == tokenize.ENDMARKER: - break - if brackets and tok == '\n': - self.next() - if tok == '(': # python allows only one `(` in the statement. - brackets = True - self.next() - i, token_type, tok = self._parsedotname(self.current) - if not i: - defunct = True - name2 = None - if tok == 'as': - name2, token_type, tok = self._parsedotname() - imports.append((i, name2, defunct)) - while tok not in continue_kw: - token_type, tok = self.next() - if not (tok == "," or brackets and tok == '\n'): - break - return imports - - def _parseparen(self): - """ - Functions and Classes have params (which means for classes - super-classes). They are parsed here and returned as Statements. - - :return: List of Statements - :rtype: list - """ - names = [] - tok = None - pos = 0 - breaks = [',', ':'] - while tok not in [')', ':']: - param, tok = self._parse_statement(added_breaks=breaks, - stmt_class=Param) - if param and tok == ':': - # parse annotations - annotation, tok = self._parse_statement(added_breaks=breaks) - if annotation: - param.add_annotation(annotation) - - # params without vars are usually syntax errors. - if param and (param.set_vars or param.used_vars): - param.position_nr = pos - names.append(param) - pos += 1 - - return names - - def _parsefunction(self): - """ - The parser for a text functions. Process the tokens, which follow a - function definition. - - :return: Return a Scope representation of the tokens. - :rtype: Function - """ - first_pos = self.start_pos - token_type, fname = self.next() - if token_type != tokenize.NAME: - return None - - fname = Name([(fname, self.start_pos)], self.start_pos, self.end_pos) - - token_type, open = self.next() - if open != '(': - return None - params = self._parseparen() - - token_type, colon = self.next() - annotation = None - if colon in ['-', '->']: - # parse annotations - if colon == '-': - # The Python 2 tokenizer doesn't understand this - token_type, colon = self.next() - if colon != '>': - return None - annotation, colon = self._parse_statement(added_breaks=[':']) - - if colon != ':': - return None - - # because of 2 line func param definitions - scope = Function(fname, params, first_pos, annotation) - if self.user_scope and scope != self.user_scope \ - and self.user_position > first_pos: - self.user_scope = scope - return scope - - def _parseclass(self): - """ - The parser for a text class. Process the tokens, which follow a - class definition. - - :return: Return a Scope representation of the tokens. - :rtype: Class - """ - first_pos = self.start_pos - token_type, cname = self.next() - if token_type != tokenize.NAME: - debug.warning("class: syntax err, token is not a name@%s (%s: %s)" - % (self.start_pos[0], tokenize.tok_name[token_type], cname)) - return None - - cname = Name([(cname, self.start_pos)], self.start_pos, self.end_pos) - - super = [] - token_type, next = self.next() - if next == '(': - super = self._parseparen() - token_type, next = self.next() - - if next != ':': - debug.warning("class syntax: %s@%s" % (cname, self.start_pos[0])) - return None - - # because of 2 line class initializations - scope = Class(cname, super, first_pos) - if self.user_scope and scope != self.user_scope \ - and self.user_position > first_pos: - self.user_scope = scope - return scope - - def _parse_statement(self, pre_used_token=None, added_breaks=None, - stmt_class=Statement, list_comp=False): - """ - Parses statements like: - - >>> a = test(b) - >>> a += 3 - 2 or b - - and so on. One line at a time. - - :param pre_used_token: The pre parsed token. - :type pre_used_token: set - :return: Statement + last parsed token. - :rtype: (Statement, str) - """ - - string = '' - set_vars = [] - used_funcs = [] - used_vars = [] - level = 0 # The level of parentheses - is_return = None - - if pre_used_token: - token_type, tok = pre_used_token - else: - token_type, tok = self.next() - - while token_type == tokenize.COMMENT: - # remove newline and comment - self.next() - token_type, tok = self.next() - - first_pos = self.start_pos - opening_brackets = ['{', '(', '['] - closing_brackets = ['}', ')', ']'] - - # the difference between "break" and "always break" is that the latter - # will even break in parentheses. This is true for typical flow - # commands like def and class and the imports, which will never be used - # in a statement. - breaks = ['\n', ':', ')'] - always_break = [';', 'import', 'from', 'class', 'def', 'try', 'except', - 'finally', 'while'] - if added_breaks: - breaks += added_breaks - - tok_list = [] - while not (tok in always_break or tok in breaks and level <= 0): - try: - set_string = None - #print 'parse_stmt', tok, tokenize.tok_name[token_type] - tok_list.append(self.current + (self.start_pos,)) - if tok == 'as': - string += " %s " % tok - token_type, tok = self.next() - if token_type == tokenize.NAME: - n, token_type, tok = self._parsedotname(self.current) - if n: - set_vars.append(n) - tok_list.append(n) - string += ".".join(n.names) - continue - elif token_type == tokenize.NAME: - if tok in ['return', 'yield', 'del', 'raise']: - if len(tok_list) > 1: - # this happens, when a statement has opening - # brackets, which are not closed again, here I just - # start a new statement. This is a hack, but I - # could not come up with a better solution. - # This is basically a reset of the statement. - debug.warning('keyword in statement %s@%s', - tok_list, self.start_pos[0]) - tok_list = [self.current + (self.start_pos,)] - set_vars = [] - used_funcs = [] - used_vars = [] - level = 0 - set_string = tok + ' ' - if tok in ['return', 'yield']: - is_return = tok - elif tok == 'for': - # list comprehensions! - middle, tok = self._parse_statement( - added_breaks=['in']) - if tok != 'in' or middle is None: - if middle is None: - level -= 1 - else: - middle.parent = self.scope - debug.warning('list comprehension formatting @%s' % - self.start_pos[0]) - continue - - b = [')', ']'] - in_clause, tok = self._parse_statement(added_breaks=b, - list_comp=True) - if tok not in b or in_clause is None: - middle.parent = self.scope - if in_clause is None: - self.gen.push_back(self._current_full) - else: - in_clause.parent = self.scope - in_clause.parent = self.scope - debug.warning('list comprehension in_clause %s@%s' - % (tok, self.start_pos[0])) - continue - other_level = 0 - - for i, tok in enumerate(reversed(tok_list)): - if not isinstance(tok, (Name, ListComprehension)): - tok = tok[1] - if tok in closing_brackets: - other_level -= 1 - elif tok in opening_brackets: - other_level += 1 - if other_level > 0: - break - else: - # could not detect brackets -> nested list comp - i = 0 - - tok_list, toks = tok_list[:-i], tok_list[-i:-1] - src = '' - for t in toks: - src += t[1] if isinstance(t, tuple) \ - else t.get_code() - st = Statement(src, [], [], [], - toks, first_pos, self.end_pos) - - for s in [st, middle, in_clause]: - s.parent = self.scope - tok = ListComprehension(st, middle, in_clause) - tok_list.append(tok) - if list_comp: - string = '' - string += tok.get_code() - continue - else: - n, token_type, tok = self._parsedotname(self.current) - # removed last entry, because we add Name - tok_list.pop() - if n: - tok_list.append(n) - if tok == '(': - # it must be a function - used_funcs.append(n) - else: - used_vars.append(n) - if string and re.match(r'[\w\d\'"]', string[-1]): - string += ' ' - string += ".".join(n.names) - continue - elif tok.endswith('=') and tok not in ['>=', '<=', '==', '!=']: - # there has been an assignement -> change vars - if level == 0: - set_vars += used_vars - used_vars = [] - elif tok in opening_brackets: - level += 1 - elif tok in closing_brackets: - level -= 1 - - string = set_string if set_string is not None else string + tok - token_type, tok = self.next() - except StopIteration: - # comes from tokenizer - break - - if not string: - return None, tok - #print 'new_stat', string, set_vars, used_funcs, used_vars - if self.freshscope and not self.no_docstr and len(tok_list) == 1 \ - and self.last_token[0] == tokenize.STRING: - self.scope.add_docstr(self.last_token[1]) - return None, tok - else: - stmt = stmt_class(string, set_vars, used_funcs, used_vars, - tok_list, first_pos, self.end_pos) - self._check_user_stmt(stmt) - if is_return: - # add returns to the scope - func = self.scope.get_parent_until(Function) - if is_return == 'yield': - func.is_generator = True - try: - func.returns.append(stmt) - except AttributeError: - debug.warning('return in non-function') - - if tok in always_break: - self.gen.push_back(self._current_full) - return stmt, tok - - def next(self): - return self.__next__() - - def __iter__(self): - return self - - def __next__(self): - """ Generate the next tokenize pattern. """ - try: - self._current_full = next(self.gen) - except tokenize.TokenError: - # We just ignore this error, I try to handle it earlier - as - # good as possible - debug.warning('parentheses not closed error') - except IndentationError: - # This is an error, that tokenize may produce, because the code - # is not indented as it should. Here it just ignores this line - # and restarts the parser. - # (This is a rather unlikely error message, for normal code, - # tokenize seems to be pretty tolerant) - debug.warning('indentation error on line %s, ignoring it' % - (self.start_pos[0])) - self._line_of_tokenize_restart = self.start_pos[0] + 1 - self.gen = PushBackIterator(tokenize.generate_tokens( - self.buf.readline)) - return self.next() - except StopIteration: - # set end_pos correctly, if we finish - s = self.scope - while s is not None: - s.end_pos = self.end_pos - s = s.parent - raise - - type, tok, self._tokenize_start_pos, self._tokenize_end_pos, \ - self.parserline = self._current_full - if self.user_position and (self.start_pos[0] == self.user_position[0] - or self.user_scope is None - and self.start_pos[0] >= self.user_position[0]): - debug.dbg('user scope found [%s] = %s' % \ - (self.parserline.replace('\n', ''), repr(self.scope))) - self.user_scope = self.scope - self.last_token = self.current - self.current = (type, tok) - return self.current - - def parse(self): - """ - The main part of the program. It analyzes the given code-text and - returns a tree-like scope. For a more detailed description, see the - class description. - - :param text: The code which should be parsed. - :param type: str - - :raises: IndentationError - """ - self.buf = StringIO(self.code) - self.gen = PushBackIterator(tokenize.generate_tokens( - self.buf.readline)) - - extended_flow = ['else', 'elif', 'except', 'finally'] - statement_toks = ['{', '[', '(', '`'] - - decorators = [] - self.freshscope = True - self.iterator = iter(self) - # This iterator stuff is not intentional. It grew historically. - for token_type, tok in self.iterator: - self.module.temp_used_names = [] - #debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\ - # % (tok, tokenize.tok_name[token_type], start_position[0])) - - while token_type == tokenize.DEDENT and self.scope != self.module: - token_type, tok = self.next() - if self.start_pos[1] <= self.scope.start_pos[1]: - self.scope.end_pos = self.start_pos - self.scope = self.scope.parent - - # check again for unindented stuff. this is true for syntax - # errors. only check for names, because thats relevant here. If - # some docstrings are not indented, I don't care. - while self.start_pos[1] <= self.scope.start_pos[1] \ - and (token_type == tokenize.NAME or tok in ['(', '['])\ - and self.scope != self.module: - self.scope.end_pos = self.start_pos - self.scope = self.scope.parent - - first_pos = self.start_pos - if tok == 'def': - func = self._parsefunction() - if func is None: - debug.warning("function: syntax error@%s" % - self.start_pos[0]) - continue - self.freshscope = True - self.scope = self.scope.add_scope(func, decorators) - decorators = [] - elif tok == 'class': - cls = self._parseclass() - if cls is None: - debug.warning("class: syntax error@%s" % self.start_pos[0]) - continue - self.freshscope = True - self.scope = self.scope.add_scope(cls, decorators) - decorators = [] - # import stuff - elif tok == 'import': - imports = self._parseimportlist() - for m, alias, defunct in imports: - i = Import(first_pos, self.end_pos, m, alias, - defunct=defunct) - self._check_user_stmt(i) - self.scope.add_import(i) - if not imports: - i = Import(first_pos, self.end_pos, None, defunct=True) - self._check_user_stmt(i) - self.freshscope = False - elif tok == 'from': - defunct = False - # take care for relative imports - relative_count = 0 - while 1: - token_type, tok = self.next() - if tok != '.': - break - relative_count += 1 - # the from import - mod, token_type, tok = self._parsedotname(self.current) - if str(mod) == 'import' and relative_count: - self.gen.push_back(self._current_full) - tok = 'import' - mod = None - if not mod and not relative_count or tok != "import": - debug.warning("from: syntax error@%s" % self.start_pos[0]) - defunct = True - if tok != 'import': - self.gen.push_back(self._current_full) - names = self._parseimportlist() - for name, alias, defunct2 in names: - star = name is not None and name.names[0] == '*' - if star: - name = None - i = Import(first_pos, self.end_pos, name, alias, mod, - star, relative_count, defunct=defunct or defunct2) - self._check_user_stmt(i) - self.scope.add_import(i) - self.freshscope = False - #loops - elif tok == 'for': - set_stmt, tok = self._parse_statement(added_breaks=['in']) - if tok == 'in': - statement, tok = self._parse_statement() - if tok == ':': - s = [] if statement is None else [statement] - f = ForFlow(s, first_pos, set_stmt) - self.scope = self.scope.add_statement(f) - else: - debug.warning('syntax err, for flow started @%s', - self.start_pos[0]) - if statement is not None: - statement.parent = self.scope - if set_stmt is not None: - set_stmt.parent = self.scope - else: - debug.warning('syntax err, for flow incomplete @%s', - self.start_pos[0]) - if set_stmt is not None: - set_stmt.parent = self.scope - - elif tok in ['if', 'while', 'try', 'with'] + extended_flow: - added_breaks = [] - command = tok - if command in ['except', 'with']: - added_breaks.append(',') - # multiple statements because of with - inits = [] - first = True - while first or command == 'with' \ - and tok not in [':', '\n']: - statement, tok = \ - self._parse_statement(added_breaks=added_breaks) - if command == 'except' and tok in added_breaks: - # the except statement defines a var - # this is only true for python 2 - n, token_type, tok = self._parsedotname() - if n: - statement.set_vars.append(n) - statement.code += ',' + n.get_code() - if statement: - inits.append(statement) - first = False - - if tok == ':': - f = Flow(command, inits, first_pos) - if command in extended_flow: - # the last statement has to be another part of - # the flow statement, because a dedent releases the - # main scope, so just take the last statement. - try: - s = self.scope.statements[-1].set_next(f) - except (AttributeError, IndexError): - # If set_next doesn't exist, just add it. - s = self.scope.add_statement(f) - else: - s = self.scope.add_statement(f) - self.scope = s - else: - for i in inits: - i.parent = self.scope - debug.warning('syntax err, flow started @%s', - self.start_pos[0]) - # globals - elif tok == 'global': - stmt, tok = self._parse_statement(self.current) - if stmt: - self.scope.add_statement(stmt) - for name in stmt.used_vars: - # add the global to the top, because there it is - # important. - self.module.add_global(name) - # decorator - elif tok == '@': - stmt, tok = self._parse_statement() - decorators.append(stmt) - elif tok == 'pass': - continue - elif tok == 'assert': - stmt, tok = self._parse_statement() - stmt.parent = self.scope - self.scope.asserts.append(stmt) - # default - elif token_type in [tokenize.NAME, tokenize.STRING, - tokenize.NUMBER] \ - or tok in statement_toks: - # this is the main part - a name can be a function or a - # normal var, which can follow anything. but this is done - # by the statement parser. - stmt, tok = self._parse_statement(self.current) - if stmt: - self.scope.add_statement(stmt) - self.freshscope = False - else: - if token_type not in [tokenize.COMMENT, tokenize.INDENT, - tokenize.NEWLINE, tokenize.NL, - tokenize.ENDMARKER]: - debug.warning('token not classified', tok, token_type, - self.start_pos[0]) - - del self.buf - return self.module diff --git a/common/.local/lib/python2.7/site-packages/jedi/settings.py b/common/.local/lib/python2.7/site-packages/jedi/settings.py deleted file mode 100644 index 23f366d..0000000 --- a/common/.local/lib/python2.7/site-packages/jedi/settings.py +++ /dev/null @@ -1,87 +0,0 @@ -# ---------------- -# completion output settings -# ---------------- - -# The completion is by default case insensitive. -case_insensitive_completion = True - -# Adds a dot after a module, because a module that is not accessed this way is -# definitely not the normal case. However, in VIM this doesn't work, that's why -# it isn't used at the moment. -add_dot_after_module = False - -# Adds an opening bracket after a function, because that's normal behaviour. -# Removed it again, because in VIM that is not very practical. -add_bracket_after_function = False - - -# ---------------- -# dynamic stuff -# ---------------- - -# check for `append`, etc. on array instances like list() -dynamic_arrays_instances = True -# check for `append`, etc. on arrays: [], {}, () -dynamic_array_additions = True - -# A dynamic param completion, finds the callees of the function, which define -# the params of a function. -dynamic_params = True -# Do the same for other modules. -dynamic_params_for_other_modules = True - -# Additional modules in which Jedi checks if statements are to be found. This -# is practical for IDE's, that want to administrate their modules themselves. -additional_dynamic_modules = [] - -# ---------------- -# recursions -# ---------------- - -# Recursion settings are important if you don't want extremly recursive python -# code to go absolutely crazy. First of there is a global limit -# `max_executions`. This limit is important, to set a maximum amount of time, -# the completion may use. -# -# The `max_until_execution_unique` limit is probably the most important one, -# because if that limit is passed, functions can only be one time executed. So -# new functions will be executed, complex recursions with the same functions -# again and again, are ignored. -# -# `max_function_recursion_level` is more about whether the recursions are -# stopped in deepth or in width. The ratio beetween this and -# `max_until_execution_unique` is important here. It stops a recursion (after -# the number of function calls in the recursion), if it was already used -# earlier. -# -# The values are based on my experimental tries, used on the jedi library. But -# I don't think there's any other Python library, that uses recursion in a -# similar (extreme) way. This makes the completion definitely worse in some -# cases. But a completion should also be fast. - -max_function_recursion_level = 5 -max_until_execution_unique = 50 -max_executions_without_builtins = 200 -max_executions = 250 - -# Because get_in_function_call is normally used on every single key hit, it has -# to be faster than a normal completion. This is the factor that is used to -# scale `max_executions` and `max_until_execution_unique`: -scale_get_in_function_call = 0.1 - -# ---------------- -# various -# ---------------- - -# Size of the current code part, which is used to speed up parsing. -part_line_length = 20 - -# ---------------- -# star import caching -# ---------------- - -# In huge packages like numpy, checking all star imports on every completion -# might be slow, therefore we do a star import caching, that lasts a certain -# time span (in seconds). - -star_import_cache_validity = 60.0 diff --git a/common/.local/lib/python2.7/site-packages/powerline/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/__init__.py deleted file mode 100644 index fdbc33f..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/__init__.py +++ /dev/null @@ -1,409 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import -import os -import sys -import logging - -from powerline.colorscheme import Colorscheme -from powerline.lib.config import ConfigLoader - -from threading import Lock, Event - - -DEFAULT_SYSTEM_CONFIG_DIR = None - - -def find_config_file(search_paths, config_file): - config_file += '.json' - for path in search_paths: - config_file_path = os.path.join(path, config_file) - if os.path.isfile(config_file_path): - return config_file_path - raise IOError('Config file not found in search path: {0}'.format(config_file)) - - -class PowerlineLogger(object): - def __init__(self, use_daemon_threads, logger, ext): - self.logger = logger - self.ext = ext - self.use_daemon_threads = use_daemon_threads - self.prefix = '' - self.last_msgs = {} - - def _log(self, attr, msg, *args, **kwargs): - prefix = kwargs.get('prefix') or self.prefix - prefix = self.ext + ((':' + prefix) if prefix else '') - if args or kwargs: - msg = msg.format(*args, **kwargs) - msg = prefix + ':' + msg - key = attr + ':' + prefix - if msg != self.last_msgs.get(key): - getattr(self.logger, attr)(msg) - self.last_msgs[key] = msg - - def critical(self, msg, *args, **kwargs): - self._log('critical', msg, *args, **kwargs) - - def exception(self, msg, *args, **kwargs): - self._log('exception', msg, *args, **kwargs) - - def info(self, msg, *args, **kwargs): - self._log('info', msg, *args, **kwargs) - - def error(self, msg, *args, **kwargs): - self._log('error', msg, *args, **kwargs) - - def warn(self, msg, *args, **kwargs): - self._log('warning', msg, *args, **kwargs) - - def debug(self, msg, *args, **kwargs): - self._log('debug', msg, *args, **kwargs) - - -class Powerline(object): - '''Main powerline class, entrance point for all powerline uses. Sets - powerline up and loads the configuration. - - :param str ext: - extension used. Determines where configuration files will - searched and what renderer module will be used. Affected: used ``ext`` - dictionary from :file:`powerline/config.json`, location of themes and - colorschemes, render module (``powerline.renders.{ext}``). - :param str renderer_module: - Overrides renderer module (defaults to ``ext``). Should be the name of - the package imported like this: ``powerline.renders.{render_module}``. - If this parameter contains a dot, ``powerline.renderers.`` is not - prepended. There is also a special case for renderers defined in - toplevel modules: ``foo.`` (note: dot at the end) tries to get renderer - from module ``foo`` (because ``foo`` (without dot) tries to get renderer - from module ``powerline.renderers.foo``). - :param bool run_once: - Determines whether .renderer.render() method will be run only once - during python session. - :param Logger logger: - If present, no new logger will be created and this logger will be used. - :param bool use_daemon_threads: - Use daemon threads for. - :param Event shutdown_event: - Use this Event as shutdown_event. - :param ConfigLoader config_loader: - Class that manages (re)loading of configuration. - ''' - - def __init__(self, - ext, - renderer_module=None, - run_once=False, - logger=None, - use_daemon_threads=True, - shutdown_event=None, - config_loader=None): - self.ext = ext - self.renderer_module = renderer_module or ext - self.run_once = run_once - self.logger = logger - self.use_daemon_threads = use_daemon_threads - - if '.' not in self.renderer_module: - self.renderer_module = 'powerline.renderers.' + self.renderer_module - elif self.renderer_module[-1] == '.': - self.renderer_module = self.renderer_module[:-1] - - config_paths = self.get_config_paths() - self.find_config_file = lambda cfg_path: find_config_file(config_paths, cfg_path) - - self.cr_kwargs_lock = Lock() - self.create_renderer_kwargs = { - 'load_main': True, - 'load_colors': True, - 'load_colorscheme': True, - 'load_theme': True, - } - self.shutdown_event = shutdown_event or Event() - self.config_loader = config_loader or ConfigLoader(shutdown_event=self.shutdown_event) - self.run_loader_update = False - - self.renderer_options = {} - - self.prev_common_config = None - self.prev_ext_config = None - self.pl = None - - def create_renderer(self, load_main=False, load_colors=False, load_colorscheme=False, load_theme=False): - '''(Re)create renderer object. Can be used after Powerline object was - successfully initialized. If any of the below parameters except - ``load_main`` is True renderer object will be recreated. - - :param bool load_main: - Determines whether main configuration file (:file:`config.json`) - should be loaded. If appropriate configuration changes implies - ``load_colorscheme`` and ``load_theme`` and recreation of renderer - object. Won’t trigger recreation if only unrelated configuration - changed. - :param bool load_colors: - Determines whether colors configuration from :file:`colors.json` - should be (re)loaded. - :param bool load_colorscheme: - Determines whether colorscheme configuration should be (re)loaded. - :param bool load_theme: - Determines whether theme configuration should be reloaded. - ''' - common_config_differs = False - ext_config_differs = False - if load_main: - self._purge_configs('main') - config = self.load_main_config() - self.common_config = config['common'] - if self.common_config != self.prev_common_config: - common_config_differs = True - self.prev_common_config = self.common_config - self.common_config['paths'] = [os.path.expanduser(path) for path in self.common_config.get('paths', [])] - self.import_paths = self.common_config['paths'] - - if not self.logger: - log_format = self.common_config.get('log_format', '%(asctime)s:%(levelname)s:%(message)s') - formatter = logging.Formatter(log_format) - - level = getattr(logging, self.common_config.get('log_level', 'WARNING')) - handler = self.get_log_handler() - handler.setLevel(level) - handler.setFormatter(formatter) - - self.logger = logging.getLogger('powerline') - self.logger.setLevel(level) - self.logger.addHandler(handler) - - if not self.pl: - self.pl = PowerlineLogger(self.use_daemon_threads, self.logger, self.ext) - if not self.config_loader.pl: - self.config_loader.pl = self.pl - - self.renderer_options.update( - pl=self.pl, - term_truecolor=self.common_config.get('term_truecolor', False), - ambiwidth=self.common_config.get('ambiwidth', 1), - tmux_escape=self.common_config.get('additional_escapes') == 'tmux', - screen_escape=self.common_config.get('additional_escapes') == 'screen', - theme_kwargs={ - 'ext': self.ext, - 'common_config': self.common_config, - 'run_once': self.run_once, - 'shutdown_event': self.shutdown_event, - }, - ) - - if not self.run_once and self.common_config.get('reload_config', True): - interval = self.common_config.get('interval', None) - self.config_loader.set_interval(interval) - self.run_loader_update = (interval is None) - if interval is not None and not self.config_loader.is_alive(): - self.config_loader.start() - - self.ext_config = config['ext'][self.ext] - if self.ext_config != self.prev_ext_config: - ext_config_differs = True - if not self.prev_ext_config or self.ext_config.get('local_themes') != self.prev_ext_config.get('local_themes'): - self.renderer_options['local_themes'] = self.get_local_themes(self.ext_config.get('local_themes')) - load_colorscheme = (load_colorscheme - or not self.prev_ext_config - or self.prev_ext_config['colorscheme'] != self.ext_config['colorscheme']) - load_theme = (load_theme - or not self.prev_ext_config - or self.prev_ext_config['theme'] != self.ext_config['theme']) - self.prev_ext_config = self.ext_config - - create_renderer = load_colors or load_colorscheme or load_theme or common_config_differs or ext_config_differs - - if load_colors: - self._purge_configs('colors') - self.colors_config = self.load_colors_config() - - if load_colorscheme or load_colors: - self._purge_configs('colorscheme') - if load_colorscheme: - self.colorscheme_config = self.load_colorscheme_config(self.ext_config['colorscheme']) - self.renderer_options['colorscheme'] = Colorscheme(self.colorscheme_config, self.colors_config) - - if load_theme: - self._purge_configs('theme') - self.renderer_options['theme_config'] = self.load_theme_config(self.ext_config.get('theme', 'default')) - - if create_renderer: - try: - Renderer = __import__(self.renderer_module, fromlist=['renderer']).renderer - except Exception as e: - self.pl.exception('Failed to import renderer module: {0}', str(e)) - sys.exit(1) - - # Renderer updates configuration file via segments’ .startup thus it - # should be locked to prevent state when configuration was updated, - # but .render still uses old renderer. - try: - renderer = Renderer(**self.renderer_options) - except Exception as e: - self.pl.exception('Failed to construct renderer object: {0}', str(e)) - if not hasattr(self, 'renderer'): - raise - else: - self.renderer = renderer - - def get_log_handler(self): - '''Get log handler. - - :param dict common_config: - Common configuration. - - :return: logging.Handler subclass. - ''' - log_file = self.common_config.get('log_file', None) - if log_file: - log_file = os.path.expanduser(log_file) - log_dir = os.path.dirname(log_file) - if not os.path.isdir(log_dir): - os.mkdir(log_dir) - return logging.FileHandler(log_file) - else: - return logging.StreamHandler() - - @staticmethod - def get_config_paths(): - '''Get configuration paths. - - :return: list of paths - ''' - config_home = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')) - config_path = os.path.join(config_home, 'powerline') - config_paths = [config_path] - config_dirs = os.environ.get('XDG_CONFIG_DIRS', DEFAULT_SYSTEM_CONFIG_DIR) - if config_dirs is not None: - config_paths.extend([os.path.join(d, 'powerline') for d in config_dirs.split(':')]) - plugin_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'config_files') - config_paths.append(plugin_path) - return config_paths - - def _load_config(self, cfg_path, type): - '''Load configuration and setup watches.''' - function = getattr(self, 'on_' + type + '_change') - try: - path = self.find_config_file(cfg_path) - except IOError: - self.config_loader.register_missing(self.find_config_file, function, cfg_path) - raise - self.config_loader.register(function, path) - return self.config_loader.load(path) - - def _purge_configs(self, type): - function = getattr(self, 'on_' + type + '_change') - self.config_loader.unregister_functions(set((function,))) - self.config_loader.unregister_missing(set(((self.find_config_file, function),))) - - def load_theme_config(self, name): - '''Get theme configuration. - - :param str name: - Name of the theme to load. - - :return: dictionary with :ref:`theme configuration ` - ''' - return self._load_config(os.path.join('themes', self.ext, name), 'theme') - - def load_main_config(self): - '''Get top-level configuration. - - :return: dictionary with :ref:`top-level configuration `. - ''' - return self._load_config('config', 'main') - - def load_colorscheme_config(self, name): - '''Get colorscheme. - - :param str name: - Name of the colorscheme to load. - - :return: dictionary with :ref:`colorscheme configuration `. - ''' - return self._load_config(os.path.join('colorschemes', self.ext, name), 'colorscheme') - - def load_colors_config(self): - '''Get colorscheme. - - :return: dictionary with :ref:`colors configuration `. - ''' - return self._load_config('colors', 'colors') - - @staticmethod - def get_local_themes(local_themes): - '''Get local themes. No-op here, to be overridden in subclasses if - required. - - :param dict local_themes: - Usually accepts ``{matcher_name : theme_name}``. May also receive - None in case there is no local_themes configuration. - - :return: - anything accepted by ``self.renderer.get_theme`` and processable by - ``self.renderer.add_local_theme``. Renderer module is determined by - ``__init__`` arguments, refer to its documentation. - ''' - return None - - def update_renderer(self): - '''Updates/creates a renderer if needed.''' - if self.run_loader_update: - self.config_loader.update() - create_renderer_kwargs = None - with self.cr_kwargs_lock: - if self.create_renderer_kwargs: - create_renderer_kwargs = self.create_renderer_kwargs.copy() - if create_renderer_kwargs: - try: - self.create_renderer(**create_renderer_kwargs) - except Exception as e: - self.pl.exception('Failed to create renderer: {0}', str(e)) - finally: - self.create_renderer_kwargs.clear() - - def render(self, *args, **kwargs): - '''Update/create renderer if needed and pass all arguments further to - ``self.renderer.render()``. - ''' - self.update_renderer() - return self.renderer.render(*args, **kwargs) - - def shutdown(self): - '''Shut down all background threads. Must be run only prior to exiting - current application. - ''' - self.shutdown_event.set() - self.renderer.shutdown() - functions = ( - self.on_main_change, - self.on_colors_change, - self.on_colorscheme_change, - self.on_theme_change, - ) - self.config_loader.unregister_functions(set(functions)) - self.config_loader.unregister_missing(set(((find_config_file, function) for function in functions))) - - def on_main_change(self, path): - with self.cr_kwargs_lock: - self.create_renderer_kwargs['load_main'] = True - - def on_colors_change(self, path): - with self.cr_kwargs_lock: - self.create_renderer_kwargs['load_colors'] = True - - def on_colorscheme_change(self, path): - with self.cr_kwargs_lock: - self.create_renderer_kwargs['load_colorscheme'] = True - - def on_theme_change(self, path): - with self.cr_kwargs_lock: - self.create_renderer_kwargs['load_theme'] = True - - def __enter__(self): - return self - - def __exit__(self, *args): - self.shutdown() diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/awesome/powerline-awesome.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/awesome/powerline-awesome.py deleted file mode 100755 index 3e4125d..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/awesome/powerline-awesome.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -# vim:fileencoding=utf-8:noet - -from powerline import Powerline -import sys -from time import sleep -from powerline.lib.monotonic import monotonic -from subprocess import Popen, PIPE - -powerline = Powerline('wm', renderer_module='pango_markup') -powerline.update_renderer() - -try: - interval = float(sys.argv[1]) -except IndexError: - interval = 2 - - -def read_to_log(pl, client): - for line in client.stdout: - if line: - pl.info(line, prefix='awesome-client') - for line in client.stderr: - if line: - pl.error(line, prefix='awesome-client') - if client.wait(): - pl.error('Client exited with {0}', client.returncode, prefix='awesome') - - -while True: - start_time = monotonic() - s = powerline.render(side='right') - request = "powerline_widget:set_markup('" + s.replace('\\', '\\\\').replace("'", "\\'") + "')\n" - client = Popen(['awesome-client'], shell=False, stdout=PIPE, stderr=PIPE, stdin=PIPE) - client.stdin.write(request.encode('utf-8')) - client.stdin.close() - read_to_log(powerline.pl, client) - sleep(max(interval - (monotonic() - start_time), 0.1)) diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/awesome/powerline.lua b/common/.local/lib/python2.7/site-packages/powerline/bindings/awesome/powerline.lua deleted file mode 100644 index 659fade..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/awesome/powerline.lua +++ /dev/null @@ -1,11 +0,0 @@ -local wibox = require('wibox') -local awful = require('awful') - -powerline_widget = wibox.widget.textbox() -powerline_widget:set_align('right') - -function powerline(mode, widget) end - -bindings_path = string.gsub(debug.getinfo(1).source:match('@(.*)$'), '/[^/]+$', '') -powerline_cmd = bindings_path .. '/powerline-awesome.py' -awful.util.spawn_with_shell('ps -C powerline-awesome.py || ' .. powerline_cmd) diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/bash/powerline.sh b/common/.local/lib/python2.7/site-packages/powerline/bindings/bash/powerline.sh deleted file mode 100644 index 50eb487..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/bash/powerline.sh +++ /dev/null @@ -1,28 +0,0 @@ -_powerline_tmux_setenv() { - if [[ -n "$TMUX" ]]; then - tmux setenv TMUX_"$1"_$(tmux display -p "#D" | tr -d %) "$2" - fi -} - -_powerline_tmux_set_pwd() { - _powerline_tmux_setenv PWD "$PWD" -} - -_powerline_tmux_set_columns() { - _powerline_tmux_setenv COLUMNS "$COLUMNS" -} - -_powerline_prompt() { - local last_exit_code=$? - [[ -z "$POWERLINE_OLD_PROMPT_COMMAND" ]] || - eval $POWERLINE_OLD_PROMPT_COMMAND - PS1="$(powerline shell left -r bash_prompt --last_exit_code=$last_exit_code)" - _powerline_tmux_set_pwd -} - -trap "_powerline_tmux_set_columns" SIGWINCH -_powerline_tmux_set_columns - -[[ "$PROMPT_COMMAND" == "_powerline_prompt" ]] || - POWERLINE_OLD_PROMPT_COMMAND="$PROMPT_COMMAND" -export PROMPT_COMMAND="_powerline_prompt" diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/post_0_11.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/post_0_11.py deleted file mode 100644 index f91a5ae..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/post_0_11.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim:fileencoding=utf-8:noet -from powerline.ipython import IpythonPowerline - -from IPython.core.prompts import PromptManager -from IPython.core.hooks import TryNext - - -class IpythonInfo(object): - def __init__(self, shell): - self._shell = shell - - @property - def prompt_count(self): - return self._shell.execution_count - - -class PowerlinePromptManager(PromptManager): - powerline = None - - def __init__(self, powerline, shell): - self.powerline = powerline - self.powerline_segment_info = IpythonInfo(shell) - self.shell = shell - - def render(self, name, color=True, *args, **kwargs): - width = None if name == 'in' else self.width - res, res_nocolor = self.powerline.render(output_raw=True, width=width, matcher_info=name, segment_info=self.powerline_segment_info) - self.txtwidth = len(res_nocolor) - self.width = self.txtwidth - return res if color else res_nocolor - - -class ConfigurableIpythonPowerline(IpythonPowerline): - def __init__(self, ip): - config = ip.config.Powerline - self.config_overrides = config.get('config_overrides') - self.theme_overrides = config.get('theme_overrides', {}) - self.path = config.get('path') - super(ConfigurableIpythonPowerline, self).__init__() - - -old_prompt_manager = None - - -def load_ipython_extension(ip): - global old_prompt_manager - - old_prompt_manager = ip.prompt_manager - powerline = ConfigurableIpythonPowerline(ip) - - ip.prompt_manager = PowerlinePromptManager(powerline=powerline, shell=ip.prompt_manager.shell) - - def shutdown_hook(): - powerline.shutdown() - raise TryNext() - - ip.hooks.shutdown_hook.add(shutdown_hook) - - -def unload_ipython_extension(ip): - ip.prompt_manager = old_prompt_manager diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/pre_0_11.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/pre_0_11.py deleted file mode 100644 index 628c39b..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/ipython/pre_0_11.py +++ /dev/null @@ -1,135 +0,0 @@ -# vim:fileencoding=utf-8:noet -from powerline.ipython import IpythonPowerline -from IPython.Prompts import BasePrompt -from IPython.ipapi import get as get_ipython -from IPython.ipapi import TryNext - -import re - - -def string(s): - if type(s) is not str: - return s.encode('utf-8') - else: - return s - - -# HACK: ipython tries to only leave us with plain ASCII -class RewriteResult(object): - def __init__(self, prompt): - self.prompt = string(prompt) - - def __str__(self): - return self.prompt - - def __add__(self, s): - if type(s) is not str: - try: - s = s.encode('utf-8') - except AttributeError: - raise NotImplementedError - return RewriteResult(self.prompt + s) - - -class IpythonInfo(object): - def __init__(self, cache): - self._cache = cache - - @property - def prompt_count(self): - return self._cache.prompt_count - - -class PowerlinePrompt(BasePrompt): - def __init__(self, powerline, powerline_last_in, old_prompt): - self.powerline = powerline - self.powerline_last_in = powerline_last_in - self.powerline_segment_info = IpythonInfo(old_prompt.cache) - self.cache = old_prompt.cache - if hasattr(old_prompt, 'sep'): - self.sep = old_prompt.sep - self.pad_left = False - - def __str__(self): - self.set_p_str() - return string(self.p_str) - - def set_p_str(self, width=None): - self.p_str, self.p_str_nocolor = ( - self.powerline.render(output_raw=True, - segment_info=self.powerline_segment_info, - matcher_info=self.powerline_prompt_type, - width=width) - ) - - @staticmethod - def set_colors(): - pass - - -class PowerlinePrompt1(PowerlinePrompt): - powerline_prompt_type = 'in' - rspace = re.compile(r'(\s*)$') - - def __str__(self): - self.cache.prompt_count += 1 - self.set_p_str() - self.cache.last_prompt = self.p_str_nocolor.split('\n')[-1] - return string(self.p_str) - - def set_p_str(self): - super(PowerlinePrompt1, self).set_p_str() - self.nrspaces = len(self.rspace.search(self.p_str_nocolor).group()) - self.prompt_text_len = len(self.p_str_nocolor) - self.nrspaces - self.powerline_last_in['nrspaces'] = self.nrspaces - self.powerline_last_in['prompt_text_len'] = self.prompt_text_len - - def auto_rewrite(self): - return RewriteResult(self.powerline.render(matcher_info='rewrite', width=self.prompt_text_len, segment_info=self.powerline_segment_info) - + (' ' * self.nrspaces)) - - -class PowerlinePromptOut(PowerlinePrompt): - powerline_prompt_type = 'out' - - def set_p_str(self): - super(PowerlinePromptOut, self).set_p_str(width=self.powerline_last_in['prompt_text_len']) - spaces = ' ' * self.powerline_last_in['nrspaces'] - self.p_str += spaces - self.p_str_nocolor += spaces - - -class PowerlinePrompt2(PowerlinePromptOut): - powerline_prompt_type = 'in2' - - -class ConfigurableIpythonPowerline(IpythonPowerline): - def __init__(self, config_overrides=None, theme_overrides={}, path=None): - self.config_overrides = config_overrides - self.theme_overrides = theme_overrides - self.path = path - super(ConfigurableIpythonPowerline, self).__init__() - - -def setup(**kwargs): - ip = get_ipython() - - powerline = ConfigurableIpythonPowerline(**kwargs) - - def late_startup_hook(): - last_in = {'nrspaces': 0, 'prompt_text_len': None} - for attr, prompt_class in ( - ('prompt1', PowerlinePrompt1), - ('prompt2', PowerlinePrompt2), - ('prompt_out', PowerlinePromptOut) - ): - old_prompt = getattr(ip.IP.outputcache, attr) - setattr(ip.IP.outputcache, attr, prompt_class(powerline, last_in, old_prompt)) - raise TryNext() - - def shutdown_hook(): - powerline.shutdown() - raise TryNext() - - ip.IP.hooks.late_startup_hook.add(late_startup_hook) - ip.IP.hooks.shutdown_hook.add(shutdown_hook) diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/qtile/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/qtile/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/qtile/widget.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/qtile/widget.py deleted file mode 100644 index 6c1e660..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/qtile/widget.py +++ /dev/null @@ -1,36 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from libqtile import bar -from libqtile.widget import base - -from powerline import Powerline as PowerlineCore - - -class Powerline(base._TextBox): - def __init__(self, timeout=2, text=" ", width=bar.CALCULATED, **config): - base._TextBox.__init__(self, text, width, **config) - self.timeout_add(timeout, self.update) - self.powerline = PowerlineCore(ext='wm', renderer_module='pango_markup') - - def update(self): - if not self.configured: - return True - self.text = self.powerline.render(side='right') - self.bar.draw() - return True - - def cmd_update(self, text): - self.update(text) - - def cmd_get(self): - return self.text - - def _configure(self, qtile, bar): - base._TextBox._configure(self, qtile, bar) - self.layout = self.drawer.textlayout( - self.text, - self.foreground, - self.font, - self.fontsize, - self.fontshadow, - markup=True) diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/tmux/powerline.conf b/common/.local/lib/python2.7/site-packages/powerline/bindings/tmux/powerline.conf deleted file mode 100644 index 5d43cd3..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/tmux/powerline.conf +++ /dev/null @@ -1,11 +0,0 @@ -set -g status on -set -g status-utf8 on -set -g status-interval 2 -set -g status-fg colour231 -set -g status-bg colour234 -set -g status-left-length 20 -set -g status-left '#[fg=colour16,bg=colour254,bold] #S #[fg=colour254,bg=colour234,nobold]#(powerline tmux left)' -set -g status-right '#(powerline tmux right)' -set -g status-right-length 150 -set -g window-status-format "#[fg=colour244,bg=colour234]#I #[fg=colour240] #[fg=colour249]#W " -set -g window-status-current-format "#[fg=colour234,bg=colour31]#[fg=colour117,bg=colour31] #I  #[fg=colour231,bold]#W #[fg=colour31,bg=colour234,nobold]" diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/vim/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/vim/__init__.py deleted file mode 100644 index 3d7f0c0..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/vim/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# vim:fileencoding=utf-8:noet - -import sys - -try: - import vim -except ImportError: - vim = {} - -try: - _vim_globals = vim.bindeval('g:') - - def vim_set_global_var(var, val): - '''Set a global var in vim using bindeval().''' - _vim_globals[var] = val - - def vim_get_func(f, rettype=None): - '''Return a vim function binding.''' - try: - func = vim.bindeval('function("' + f + '")') - if sys.version_info >= (3,) and rettype is str: - return (lambda *args, **kwargs: func(*args, **kwargs).decode('utf-8', errors='replace')) - return func - except vim.error: - return None -except AttributeError: - import json - - def vim_set_global_var(var, val): # NOQA - '''Set a global var in vim using vim.command(). - - This is a fallback function for older vim versions. - ''' - vim.command('let g:{0}={1}'.format(var, json.dumps(val))) - - class VimFunc(object): - '''Evaluate a vim function using vim.eval(). - - This is a fallback class for older vim versions. - ''' - __slots__ = ('f', 'rettype') - - def __init__(self, f, rettype=None): - self.f = f - self.rettype = rettype - - def __call__(self, *args): - r = vim.eval(self.f + '(' + json.dumps(args)[1:-1] + ')') - if self.rettype: - return self.rettype(r) - return r - - vim_get_func = VimFunc - -if sys.version_info < (3,) or not hasattr(vim, 'bindeval'): - getbufvar = vim_get_func('getbufvar') -else: - _getbufvar = vim_get_func('getbufvar') - - def getbufvar(*args): - r = _getbufvar(*args) - if type(r) is bytes: - return r.decode('utf-8') - return r diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/vim/plugin/powerline.vim b/common/.local/lib/python2.7/site-packages/powerline/bindings/vim/plugin/powerline.vim deleted file mode 100644 index 6ed0f03..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/vim/plugin/powerline.vim +++ /dev/null @@ -1,95 +0,0 @@ -if exists('g:powerline_loaded') - finish -endif -let g:powerline_loaded = 1 - -function! s:CriticalError(message) - echohl ErrorMsg - echomsg a:message - echohl None -endfunction - -if ! has('python') && ! has('python3') - call s:CriticalError('You need vim compiled with Python 2.6+ or 3.2+ support - \ for Powerline to work. Please consult the documentation for more details.') - finish -endif - -let s:powerline_pycmd = substitute(get(g:, 'powerline_pycmd', has('python') ? 'py' : 'py3'), - \'\v^(py)%[thon](3?)$', '\1\2', '') -let s:powerline_pyeval = get(g:, 'powerline_pyeval', s:powerline_pycmd.'eval') - -let s:import_cmd = 'from powerline.vim import VimPowerline' -try - exec s:powerline_pycmd "try:\n" - \ ." ".s:import_cmd."\n" - \ ."except ImportError:\n" - \ ." import sys, vim\n" - \ ." sys.path.append(vim.eval('expand(\":h:h:h:h:h\")'))\n" - \ ." ".s:import_cmd - let s:launched = 1 -finally - if !exists('s:launched') - call s:CriticalError('An error occurred while importing the Powerline package. - \ This could be caused by an invalid sys.path setting, or by an incompatible - \ Python version (Powerline requires Python 2.6+ or 3.2+ to work). Please consult - \ the troubleshooting section in the documentation for possible solutions.') - finish - else - unlet s:launched - endif -endtry - -if !get(g:, 'powerline_debugging_pyeval') && exists('*'. s:powerline_pyeval) - let s:pyeval = function(s:powerline_pyeval) -else - exec s:powerline_pycmd 'import json, vim' - exec "function! s:pyeval(e)\n". - \ s:powerline_pycmd." vim.command('return ' + json.dumps(eval(vim.eval('a:e'))))\n". - \"endfunction" -endif - -let s:last_window_id = 0 -function! s:GetWinID(winnr) - let r = getwinvar(a:winnr, 'window_id') - if empty(r) - let r = s:last_window_id - let s:last_window_id += 1 - call setwinvar(a:winnr, 'window_id', r) - endif - " Without this condition it triggers unneeded statusline redraw - if getwinvar(a:winnr, '&statusline') isnot# '%!Powerline('.r.')' - call setwinvar(a:winnr, '&statusline', '%!Powerline('.r.')') - endif - return r -endfunction - -function! Powerline(window_id) - let winidx = index(map(range(1, winnr('$')), 's:GetWinID(v:val)'), a:window_id) - let current = w:window_id is# a:window_id - return s:pyeval('powerline.render('. a:window_id .', '. winidx .', '. current .')') -endfunction - -function! PowerlineNew() - call map(range(1, winnr('$')), 's:GetWinID(v:val)') -endfunction - -function! PowerlineRegisterCachePurgerEvent(event) - exec s:powerline_pycmd 'from powerline.segments.vim import launchevent as powerline_launchevent' - augroup Powerline - exec 'autocmd' a:event '*' s:powerline_pycmd.' powerline_launchevent("'.a:event.'")' - augroup END -endfunction - -augroup Powerline - autocmd! ColorScheme * :exec s:powerline_pycmd 'powerline.reset_highlight()' - autocmd! VimEnter * :redrawstatus! - autocmd! VimLeavePre * :exec s:powerline_pycmd 'powerline.shutdown()' -augroup END - -exec s:powerline_pycmd 'powerline = VimPowerline()' -exec s:powerline_pycmd 'del VimPowerline' -" Is immediately changed when PowerlineNew() function is run. Good for global -" value. -set statusline=%!PowerlineNew() -call PowerlineNew() diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/zsh/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/bindings/zsh/__init__.py deleted file mode 100644 index 1634794..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/zsh/__init__.py +++ /dev/null @@ -1,128 +0,0 @@ -# vim:fileencoding=utf-8:noet -import zsh -import atexit -from powerline.shell import ShellPowerline -from powerline.lib import parsedotval - - -used_powerlines = [] - - -def shutdown(): - for powerline in used_powerlines: - powerline.shutdown() - - -def get_var_config(var): - try: - return [parsedotval(i) for i in zsh.getvalue(var).items()] - except: - return None - - -class Args(object): - ext = ['shell'] - renderer_module = 'zsh_prompt' - - @property - def last_exit_code(self): - return zsh.last_exit_code() - - @property - def last_pipe_status(self): - return zsh.pipestatus() - - @property - def config(self): - try: - return get_var_config('POWERLINE_CONFIG') - except IndexError: - return None - - @property - def theme_option(self): - try: - return get_var_config('POWERLINE_THEME_CONFIG') - except IndexError: - return None - - @property - def config_path(self): - try: - return zsh.getvalue('POWERLINE_CONFIG_PATH') - except IndexError: - return None - - -def string(s): - if type(s) is bytes: - return s.decode('utf-8', errors='replace') - else: - return str(s) - - -class Environment(object): - @staticmethod - def __getitem__(key): - try: - return string(zsh.getvalue(key)) - except IndexError as e: - raise KeyError(*e.args) - - @staticmethod - def get(key, default=None): - try: - return string(zsh.getvalue(key)) - except IndexError: - return default - - -environ = Environment() - - -class Prompt(object): - __slots__ = ('powerline', 'side', 'savedpsvar', 'savedps', 'args') - - def __init__(self, powerline, side, savedpsvar=None, savedps=None): - self.powerline = powerline - self.side = side - self.savedpsvar = savedpsvar - self.savedps = savedps - self.args = powerline.args - - def __str__(self): - r = self.powerline.render( - width=zsh.columns(), - side=self.side, - segment_info={'args': self.args, 'environ': environ} - ) - if type(r) is not str: - if type(r) is bytes: - return r.decode('utf-8') - else: - return r.encode('utf-8') - return r - - def __del__(self): - if self.savedps: - zsh.setvalue(self.savedpsvar, self.savedps) - used_powerlines.remove(self.powerline) - if self.powerline not in used_powerlines: - self.powerline.shutdown() - - -def set_prompt(powerline, psvar, side): - savedps = zsh.getvalue(psvar) - zpyvar = 'ZPYTHON_POWERLINE_' + psvar - prompt = Prompt(powerline, side, psvar, savedps) - zsh.set_special_string(zpyvar, prompt) - zsh.setvalue(psvar, '${' + zpyvar + '}') - - -def setup(): - powerline = ShellPowerline(Args()) - used_powerlines.append(powerline) - used_powerlines.append(powerline) - set_prompt(powerline, 'PS1', 'left') - set_prompt(powerline, 'RPS1', 'right') - atexit.register(shutdown) diff --git a/common/.local/lib/python2.7/site-packages/powerline/bindings/zsh/powerline.zsh b/common/.local/lib/python2.7/site-packages/powerline/bindings/zsh/powerline.zsh deleted file mode 100644 index a45a665..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/bindings/zsh/powerline.zsh +++ /dev/null @@ -1,39 +0,0 @@ -_powerline_tmux_setenv() { - emulate -L zsh - if [[ -n "$TMUX" ]]; then - tmux setenv TMUX_"$1"_$(tmux display -p "#D" | tr -d %) "$2" - fi -} - -_powerline_tmux_set_pwd() { - _powerline_tmux_setenv PWD "$PWD" -} - -_powerline_tmux_set_columns() { - _powerline_tmux_setenv COLUMNS "$COLUMNS" -} - -_powerline_install_precmd() { - emulate zsh - for f in "${precmd_functions[@]}"; do - if [[ "$f" = "_powerline_precmd" ]]; then - return - fi - done - chpwd_functions+=( _powerline_tmux_set_pwd ) - setopt promptpercent - setopt promptsubst - if zmodload zsh/zpython &>/dev/null ; then - zpython 'from powerline.bindings.zsh import setup as powerline_setup' - zpython 'powerline_setup()' - zpython 'del powerline_setup' - else - PS1='$(powerline shell left -r zsh_prompt --last_exit_code=$? --last_pipe_status="$pipestatus")' - RPS1='$(powerline shell right -r zsh_prompt --last_exit_code=$? --last_pipe_status="$pipestatus")' - fi -} - -trap "_powerline_tmux_set_columns" SIGWINCH -_powerline_tmux_set_columns - -_powerline_install_precmd diff --git a/common/.local/lib/python2.7/site-packages/powerline/colorscheme.py b/common/.local/lib/python2.7/site-packages/powerline/colorscheme.py deleted file mode 100644 index 3b30118..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/colorscheme.py +++ /dev/null @@ -1,143 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from copy import copy - - -DEFAULT_MODE_KEY = None -ATTR_BOLD = 1 -ATTR_ITALIC = 2 -ATTR_UNDERLINE = 4 - - -def get_attr_flag(attributes): - '''Convert an attribute array to a renderer flag.''' - attr_flag = 0 - if 'bold' in attributes: - attr_flag |= ATTR_BOLD - if 'italic' in attributes: - attr_flag |= ATTR_ITALIC - if 'underline' in attributes: - attr_flag |= ATTR_UNDERLINE - return attr_flag - - -def pick_gradient_value(grad_list, gradient_level): - '''Given a list of colors and gradient percent, return a color that should be used. - - Note: gradient level is not checked for being inside [0, 100] interval. - ''' - return grad_list[int(round(gradient_level * (len(grad_list) - 1) / 100))] - - -def hl_iter(value): - if type(value) is list: - for v in value: - yield v - else: - yield value - - -class Colorscheme(object): - def __init__(self, colorscheme_config, colors_config): - '''Initialize a colorscheme.''' - self.colors = {} - self.gradients = {} - - self.groups = colorscheme_config['groups'] - self.translations = colorscheme_config.get('mode_translations', {}) - - # Create a dict of color tuples with both a cterm and hex value - for color_name, color in colors_config['colors'].items(): - try: - self.colors[color_name] = (color[0], int(color[1], 16)) - except TypeError: - self.colors[color_name] = (color, cterm_to_hex[color]) - - # Create a dict of gradient names with two lists: for cterm and hex - # values. Two lists in place of one list of pairs were chosen because - # true colors allow more precise gradients. - for gradient_name, gradient in colors_config['gradients'].items(): - if len(gradient) == 2: - self.gradients[gradient_name] = ( - (gradient[0], [int(color, 16) for color in gradient[1]])) - else: - self.gradients[gradient_name] = ( - (gradient[0], [cterm_to_hex[color] for color in gradient[0]])) - - def get_gradient(self, gradient, gradient_level): - if gradient in self.gradients: - return tuple((pick_gradient_value(grad_list, gradient_level) for grad_list in self.gradients[gradient])) - else: - return self.colors[gradient] - - def get_highlighting(self, groups, mode, gradient_level=None): - trans = self.translations.get(mode, {}) - for group in hl_iter(groups): - if 'groups' in trans and group in trans['groups']: - try: - group_props = trans['groups'][group] - except KeyError: - continue - break - - else: - try: - group_props = copy(self.groups[group]) - except KeyError: - continue - - try: - ctrans = trans['colors'] - for key in ('fg', 'bg'): - try: - group_props[key] = ctrans[group_props[key]] - except KeyError: - pass - except KeyError: - pass - - break - else: - raise KeyError('Highlighting groups not found in colorscheme: ' + ', '.join(hl_iter(groups))) - - if gradient_level is None: - pick_color = self.colors.__getitem__ - else: - pick_color = lambda gradient: self.get_gradient(gradient, gradient_level) - - return { - 'fg': pick_color(group_props['fg']), - 'bg': pick_color(group_props['bg']), - 'attr': get_attr_flag(group_props.get('attr', [])), - } - - -# 0 1 2 3 4 5 6 7 8 9 -cterm_to_hex = ( - 0x000000, 0xc00000, 0x008000, 0x804000, 0x0000c0, 0xc000c0, 0x008080, 0xc0c0c0, 0x808080, 0xff6060, # 0 - 0x00ff00, 0xffff00, 0x8080ff, 0xff40ff, 0x00ffff, 0xffffff, 0x000000, 0x00005f, 0x000087, 0x0000af, # 1 - 0x0000d7, 0x0000ff, 0x005f00, 0x005f5f, 0x005f87, 0x005faf, 0x005fd7, 0x005fff, 0x008700, 0x00875f, # 2 - 0x008787, 0x0087af, 0x0087d7, 0x0087ff, 0x00af00, 0x00af5f, 0x00af87, 0x00afaf, 0x00afd7, 0x00afff, # 3 - 0x00d700, 0x00d75f, 0x00d787, 0x00d7af, 0x00d7d7, 0x00d7ff, 0x00ff00, 0x00ff5f, 0x00ff87, 0x00ffaf, # 4 - 0x00ffd7, 0x00ffff, 0x5f0000, 0x5f005f, 0x5f0087, 0x5f00af, 0x5f00d7, 0x5f00ff, 0x5f5f00, 0x5f5f5f, # 5 - 0x5f5f87, 0x5f5faf, 0x5f5fd7, 0x5f5fff, 0x5f8700, 0x5f875f, 0x5f8787, 0x5f87af, 0x5f87d7, 0x5f87ff, # 6 - 0x5faf00, 0x5faf5f, 0x5faf87, 0x5fafaf, 0x5fafd7, 0x5fafff, 0x5fd700, 0x5fd75f, 0x5fd787, 0x5fd7af, # 7 - 0x5fd7d7, 0x5fd7ff, 0x5fff00, 0x5fff5f, 0x5fff87, 0x5fffaf, 0x5fffd7, 0x5fffff, 0x870000, 0x87005f, # 8 - 0x870087, 0x8700af, 0x8700d7, 0x8700ff, 0x875f00, 0x875f5f, 0x875f87, 0x875faf, 0x875fd7, 0x875fff, # 9 - 0x878700, 0x87875f, 0x878787, 0x8787af, 0x8787d7, 0x8787ff, 0x87af00, 0x87af5f, 0x87af87, 0x87afaf, # 10 - 0x87afd7, 0x87afff, 0x87d700, 0x87d75f, 0x87d787, 0x87d7af, 0x87d7d7, 0x87d7ff, 0x87ff00, 0x87ff5f, # 11 - 0x87ff87, 0x87ffaf, 0x87ffd7, 0x87ffff, 0xaf0000, 0xaf005f, 0xaf0087, 0xaf00af, 0xaf00d7, 0xaf00ff, # 12 - 0xaf5f00, 0xaf5f5f, 0xaf5f87, 0xaf5faf, 0xaf5fd7, 0xaf5fff, 0xaf8700, 0xaf875f, 0xaf8787, 0xaf87af, # 13 - 0xaf87d7, 0xaf87ff, 0xafaf00, 0xafaf5f, 0xafaf87, 0xafafaf, 0xafafd7, 0xafafff, 0xafd700, 0xafd75f, # 14 - 0xafd787, 0xafd7af, 0xafd7d7, 0xafd7ff, 0xafff00, 0xafff5f, 0xafff87, 0xafffaf, 0xafffd7, 0xafffff, # 15 - 0xd70000, 0xd7005f, 0xd70087, 0xd700af, 0xd700d7, 0xd700ff, 0xd75f00, 0xd75f5f, 0xd75f87, 0xd75faf, # 16 - 0xd75fd7, 0xd75fff, 0xd78700, 0xd7875f, 0xd78787, 0xd787af, 0xd787d7, 0xd787ff, 0xd7af00, 0xd7af5f, # 17 - 0xd7af87, 0xd7afaf, 0xd7afd7, 0xd7afff, 0xd7d700, 0xd7d75f, 0xd7d787, 0xd7d7af, 0xd7d7d7, 0xd7d7ff, # 18 - 0xd7ff00, 0xd7ff5f, 0xd7ff87, 0xd7ffaf, 0xd7ffd7, 0xd7ffff, 0xff0000, 0xff005f, 0xff0087, 0xff00af, # 19 - 0xff00d7, 0xff00ff, 0xff5f00, 0xff5f5f, 0xff5f87, 0xff5faf, 0xff5fd7, 0xff5fff, 0xff8700, 0xff875f, # 20 - 0xff8787, 0xff87af, 0xff87d7, 0xff87ff, 0xffaf00, 0xffaf5f, 0xffaf87, 0xffafaf, 0xffafd7, 0xffafff, # 21 - 0xffd700, 0xffd75f, 0xffd787, 0xffd7af, 0xffd7d7, 0xffd7ff, 0xffff00, 0xffff5f, 0xffff87, 0xffffaf, # 22 - 0xffffd7, 0xffffff, 0x080808, 0x121212, 0x1c1c1c, 0x262626, 0x303030, 0x3a3a3a, 0x444444, 0x4e4e4e, # 23 - 0x585858, 0x626262, 0x6c6c6c, 0x767676, 0x808080, 0x8a8a8a, 0x949494, 0x9e9e9e, 0xa8a8a8, 0xb2b2b2, # 24 - 0xbcbcbc, 0xc6c6c6, 0xd0d0d0, 0xdadada, 0xe4e4e4, 0xeeeeee # 25 -) diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colors.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colors.json deleted file mode 100644 index c64b74c..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colors.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "colors": { - "black": 16, - "white": 231, - - "darkestgreen": 22, - "darkgreen": 28, - "mediumgreen": 70, - "brightgreen": 148, - - "darkestcyan": 23, - "darkcyan": 74, - "mediumcyan": 117, - "brightcyan": 159, - - "darkestblue": 24, - "darkblue": 31, - - "darkestred": 52, - "darkred": 88, - "mediumred": 124, - "brightred": 160, - "brightestred": 196, - - "darkestpurple": 55, - "mediumpurple": 98, - "brightpurple": 189, - - "darkorange": 94, - "mediumorange": 166, - "brightorange": 208, - "brightestorange": 214, - - "brightyellow": 220, - - "gray0": 233, - "gray1": 235, - "gray2": 236, - "gray3": 239, - "gray4": 240, - "gray5": 241, - "gray6": 244, - "gray7": 245, - "gray8": 247, - "gray9": 250, - "gray10": 252, - - "gray61": [14, "93a1a1"], - "gray13": [8, "002b36"], - - "royalblue5": [0, "073642"], - "darkgreencopper": [10, "586e75"], - "lightskyblue4": [11, "657b83"], - "azure4": [12, "839496"], - "lightyellow": [7, "eee8d5"], - "oldlace": [15, "fdf6e3"], - - "green": [2, "719e07"], - "cyan": [6, "2aa198"], - "blue": [4, "268bd2"], - "red": [1, "dc322f"], - "magenta": [5, "d33682"], - "violet": [13, "6c71c4"], - "orange": [9, "cb4b16"], - "yellow": [3, "b58900"], - - "lightyellowgreen": 106, - "gold3": 178, - "orangered": 202, - - "steelblue": 67, - "darkorange3": 166, - "skyblue1": 117, - "khaki1": 228 - }, - "gradients": { - "dark_GREEN_Orange_red": [ - [22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 94, 94, 94, 94, 94, 94, 94, 52], - ["005f00", "015f00", "025f00", "035f00", "045f00", "055f00", "065f00", "075f00", "085f00", "095f00", "0b5f00", "0c5f00", "0d5f00", "0e5f00", "0f5f00", "105f00", "115f00", "125f00", "135f00", "145f00", "165f00", "175f00", "185f00", "195f00", "1a5f00", "1b5f00", "1c5f00", "1d5f00", "1e5f00", "1f5f00", "215f00", "225f00", "235f00", "245f00", "255f00", "265f00", "275f00", "285f00", "295f00", "2a5f00", "2c5f00", "2d5f00", "2e5f00", "2f5f00", "305f00", "315f00", "325f00", "335f00", "345f00", "355f00", "375f00", "385f00", "395f00", "3a5f00", "3b5f00", "3c5f00", "3d5f00", "3e5f00", "3f5f00", "415f00", "425f00", "435f00", "445f00", "455f00", "465f00", "475f00", "485f00", "495f00", "4a5f00", "4c5f00", "4d5f00", "4e5f00", "4f5f00", "505f00", "515f00", "525f00", "535f00", "545f00", "555f00", "575f00", "585f00", "595f00", "5a5f00", "5b5f00", "5c5f00", "5d5f00", "5e5f00", "615f00", "655f00", "685f00", "6c5f00", "6f5f00", "735f00", "765f00", "7a5f00", "7d5f00", "815f00", "845f00", "815200", "702900"] - ], - "GREEN_Orange_red": [ - [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1], - ["005f00", "015f00", "025f00", "035f00", "045f00", "055f00", "065f00", "075f00", "085f00", "095f00", "0b5f00", "0c5f00", "0d5f00", "0e5f00", "0f5f00", "105f00", "115f00", "125f00", "135f00", "145f00", "165f00", "175f00", "185f00", "195f00", "1a5f00", "1b5f00", "1c5f00", "1d5f00", "1e5f00", "1f5f00", "215f00", "225f00", "235f00", "245f00", "255f00", "265f00", "275f00", "285f00", "295f00", "2a5f00", "2c5f00", "2d5f00", "2e5f00", "2f5f00", "305f00", "315f00", "325f00", "335f00", "345f00", "355f00", "375f00", "385f00", "395f00", "3a5f00", "3b5f00", "3c5f00", "3d5f00", "3e5f00", "3f5f00", "415f00", "425f00", "435f00", "445f00", "455f00", "465f00", "475f00", "485f00", "495f00", "4a5f00", "4c5f00", "4d5f00", "4e5f00", "4f5f00", "505f00", "515f00", "525f00", "535f00", "545f00", "555f00", "575f00", "585f00", "595f00", "5a5f00", "5b5f00", "5c5f00", "5d5f00", "5e5f00", "615f00", "655f00", "685f00", "6c5f00", "6f5f00", "735f00", "765f00", "7a5f00", "7d5f00", "815f00", "845f00", "815200", "702900"] - ], - "green_yellow_red": [ - [190, 184, 178, 172, 166, 160], - ["8ae71c", "8ce71c", "8fe71c", "92e71c", "95e71d", "98e71d", "9ae71d", "9de71d", "a0e71e", "a3e71e", "a6e71e", "a8e71e", "abe71f", "aee71f", "b1e71f", "b4e71f", "b6e720", "b9e720", "bce720", "bfe720", "c2e821", "c3e721", "c5e621", "c7e521", "c9e522", "cbe422", "cde322", "cfe222", "d1e223", "d3e123", "d5e023", "d7df23", "d9df24", "dbde24", "dddd24", "dfdc24", "e1dc25", "e3db25", "e5da25", "e7d925", "e9d926", "e9d626", "e9d426", "e9d126", "e9cf27", "e9cc27", "e9ca27", "e9c727", "e9c528", "e9c228", "e9c028", "e9bd28", "e9bb29", "e9b829", "e9b629", "e9b329", "e9b12a", "e9ae2a", "e9ac2a", "e9a92a", "eaa72b", "eaa42b", "eaa22b", "ea9f2b", "ea9d2c", "ea9b2c", "ea982c", "ea962c", "ea942d", "ea912d", "ea8f2d", "ea8d2d", "ea8a2e", "ea882e", "ea862e", "ea832e", "ea812f", "ea7f2f", "ea7c2f", "ea7a2f", "eb7830", "eb7530", "eb7330", "eb7130", "eb6f31", "eb6c31", "eb6a31", "eb6831", "eb6632", "eb6332", "eb6132", "eb5f32", "eb5d33", "eb5a33", "eb5833", "eb5633", "eb5434", "eb5134", "eb4f34", "eb4d34", "ec4b35"] - ], - "green_yellow_orange_red": [ - [2, 3, 9, 1], - ["719e07", "739d06", "759c06", "779c06", "799b06", "7b9a05", "7d9a05", "7f9905", "819805", "839805", "859704", "879704", "899604", "8b9504", "8d9504", "8f9403", "919303", "949303", "969203", "989102", "9a9102", "9c9002", "9e9002", "a08f02", "a28e01", "a48e01", "a68d01", "a88c01", "aa8c01", "ac8b00", "ae8a00", "b08a00", "b28900", "b58900", "b58700", "b68501", "b78302", "b78102", "b87f03", "b97d04", "b97b04", "ba7905", "bb7806", "bb7606", "bc7407", "bd7208", "bd7008", "be6e09", "bf6c0a", "bf6a0a", "c0690b", "c1670c", "c1650c", "c2630d", "c3610e", "c35f0e", "c45d0f", "c55b10", "c55a10", "c65811", "c75612", "c75412", "c85213", "c95014", "c94e14", "ca4c15", "cb4b16", "cb4a16", "cc4917", "cc4818", "cd4719", "cd4719", "ce461a", "ce451b", "cf441c", "cf441c", "d0431d", "d0421e", "d1411f", "d1411f", "d24020", "d23f21", "d33e22", "d33e22", "d43d23", "d43c24", "d53b25", "d53b25", "d63a26", "d63927", "d73828", "d73828", "d83729", "d8362a", "d9352b", "d9352b", "da342c", "da332d", "db322e", "dc322f"] - ], - "yellow_red": [ - [220, 178, 172, 166, 160], - ["ffd700", "fdd500", "fbd300", "fad200", "f8d000", "f7cf00", "f5cd00", "f3cb00", "f2ca00", "f0c800", "efc700", "edc500", "ebc300", "eac200", "e8c000", "e7bf00", "e5bd00", "e3bb00", "e2ba00", "e0b800", "dfb700", "ddb500", "dbb300", "dab200", "d8b000", "d7af00", "d7ad00", "d7ab00", "d7aa00", "d7a800", "d7a700", "d7a500", "d7a300", "d7a200", "d7a000", "d79f00", "d79d00", "d79b00", "d79a00", "d79800", "d79700", "d79500", "d79300", "d79200", "d79000", "d78f00", "d78d00", "d78b00", "d78a00", "d78800", "d78700", "d78500", "d78300", "d78200", "d78000", "d77f00", "d77d00", "d77b00", "d77a00", "d77800", "d77700", "d77500", "d77300", "d77200", "d77000", "d76f00", "d76d00", "d76b00", "d76a00", "d76800", "d76700", "d76500", "d76300", "d76200", "d76000", "d75f00", "d75b00", "d75700", "d75300", "d74f00", "d74c00", "d74800", "d74400", "d74000", "d73c00", "d73900", "d73500", "d73100", "d72d00", "d72900", "d72600", "d72200", "d71e00", "d71a00", "d71600", "d71300", "d70f00", "d70b00", "d70700"] - ], - "yellow_orange_red": [ - [3, 9, 1], - ["b58900", "b58700", "b58600", "b68501", "b68401", "b78202", "b78102", "b88003", "b87f03", "b87d03", "b97c04", "b97b04", "ba7a05", "ba7805", "bb7706", "bb7606", "bc7507", "bc7307", "bc7207", "bd7108", "bd7008", "be6e09", "be6d09", "bf6c0a", "bf6b0a", "c06a0b", "c0680b", "c0670b", "c1660c", "c1650c", "c2630d", "c2620d", "c3610e", "c3600e", "c35e0e", "c45d0f", "c45c0f", "c55b10", "c55910", "c65811", "c65711", "c75612", "c75412", "c75312", "c85213", "c85113", "c94f14", "c94e14", "ca4d15", "ca4c15", "cb4b16", "cb4a16", "cb4a17", "cc4917", "cc4918", "cc4818", "cd4819", "cd4719", "cd471a", "ce461a", "ce461b", "ce451b", "cf451c", "cf441c", "cf441d", "d0431d", "d0431e", "d0421e", "d1421f", "d1411f", "d14120", "d24020", "d24021", "d23f21", "d33f22", "d33e22", "d33e23", "d43d23", "d43d24", "d43c24", "d53c25", "d53b25", "d53b26", "d63a26", "d63a27", "d63927", "d73928", "d73828", "d73829", "d83729", "d8372a", "d8362a", "d9362b", "d9352b", "d9352c", "da342c", "da342d", "da332d", "db332e"] - ], - "blue_red": [ - [39, 74, 68, 67, 103, 97, 96, 132, 131, 167, 203, 197], - ["19b4fe", "1bb2fc", "1db1fa", "1faff8", "22aef6", "24adf4", "26abf2", "29aaf0", "2ba9ee", "2da7ec", "30a6ea", "32a5e8", "34a3e6", "36a2e4", "39a0e2", "3b9fe1", "3d9edf", "409cdd", "429bdb", "449ad9", "4798d7", "4997d5", "4b96d3", "4d94d1", "5093cf", "5292cd", "5490cb", "578fc9", "598dc7", "5b8cc6", "5e8bc4", "6089c2", "6288c0", "6487be", "6785bc", "6984ba", "6b83b8", "6e81b6", "7080b4", "727eb2", "757db0", "777cae", "797aac", "7b79ab", "7e78a9", "8076a7", "8275a5", "8574a3", "8772a1", "89719f", "8c709d", "8e6e9b", "906d99", "926b97", "956a95", "976993", "996791", "9c668f", "9e658e", "a0638c", "a3628a", "a56188", "a75f86", "a95e84", "ac5c82", "ae5b80", "b05a7e", "b3587c", "b5577a", "b75678", "ba5476", "bc5374", "be5273", "c05071", "c34f6f", "c54e6d", "c74c6b", "ca4b69", "cc4967", "ce4865", "d14763", "d34561", "d5445f", "d7435d", "da415b", "dc4059", "de3f58", "e13d56", "e33c54", "e53a52", "e83950", "ea384e", "ec364c", "ee354a", "f13448", "f33246", "f53144", "f83042", "fa2e40"] - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/ipython/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/ipython/default.json deleted file mode 100644 index d787591..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/ipython/default.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "name": "Default color scheme for IPython prompt", - "groups": { - "virtualenv": { "fg": "white", "bg": "darkcyan" }, - "prompt": { "fg": "gray9", "bg": "gray4" }, - "prompt_count": { "fg": "white", "bg": "gray4" } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/shell/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/shell/default.json deleted file mode 100644 index 639c1f9..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/shell/default.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "Default color scheme for shell prompts", - "groups": { - "user": { "fg": "white", "bg": "darkblue", "attr": ["bold"] }, - "superuser": { "fg": "white", "bg": "brightred", "attr": ["bold"] }, - "virtualenv": { "fg": "white", "bg": "darkcyan" }, - "branch": { "fg": "gray9", "bg": "gray2" }, - "branch_dirty": { "fg": "brightyellow", "bg": "gray2" }, - "branch_clean": { "fg": "gray9", "bg": "gray2" }, - "cwd": { "fg": "gray9", "bg": "gray4" }, - "cwd:current_folder": { "fg": "gray10", "bg": "gray4", "attr": ["bold"] }, - "cwd:divider": { "fg": "gray7", "bg": "gray4" }, - "hostname": { "fg": "brightyellow", "bg": "mediumorange" }, - "exit_fail": { "fg": "white", "bg": "darkestred" }, - "exit_success": { "fg": "white", "bg": "darkestgreen" } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/shell/solarized.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/shell/solarized.json deleted file mode 100644 index 5bf8672..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/shell/solarized.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "Solarized Dark", - "groups": { - "user": { "fg": "oldlace", "bg": "blue", "attr": ["bold"] }, - "superuser": { "fg": "oldlace", "bg": "red", "attr": ["bold"] }, - "virtualenv": { "fg": "oldlace", "bg": "green" }, - "branch": { "fg": "gray61", "bg": "royalblue5" }, - "branch_dirty": { "fg": "yellow", "bg": "royalblue5" }, - "branch_clean": { "fg": "gray61", "bg": "royalblue5" }, - "cwd": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "cwd:current_folder": { "fg": "oldlace", "bg": "darkgreencopper", "attr": ["bold"] }, - "cwd:divider": { "fg": "gray61", "bg": "darkgreencopper" }, - "hostname": { "fg": "oldlace", "bg": "darkgreencopper" }, - "exit_fail": { "fg": "oldlace", "bg": "red" }, - "exit_success": { "fg": "oldlace", "bg": "green" } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/tmux/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/tmux/default.json deleted file mode 100644 index 35686d3..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/tmux/default.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "name": "Default color scheme for terminal prompts", - "groups": { - "background:divider": { "fg": "gray5", "bg": "gray0" }, - "session": { "fg": "black", "bg": "gray10", "attr": ["bold"] }, - "date": { "fg": "gray8", "bg": "gray2" }, - "time": { "fg": "gray10", "bg": "gray2", "attr": ["bold"] }, - "time:divider": { "fg": "gray5", "bg": "gray2" }, - "email_alert": { "fg": "white", "bg": "brightred", "attr": ["bold"] }, - "email_alert_gradient": { "fg": "white", "bg": "yellow_orange_red", "attr": ["bold"] }, - "hostname": { "fg": "black", "bg": "gray10", "attr": ["bold"] }, - "weather": { "fg": "gray8", "bg": "gray0" }, - "weather_temp_gradient": { "fg": "blue_red", "bg": "gray0" }, - "weather_condition_hot": { "fg": "khaki1", "bg": "gray0" }, - "weather_condition_snowy": { "fg": "skyblue1", "bg": "gray0" }, - "weather_condition_rainy": { "fg": "skyblue1", "bg": "gray0" }, - "uptime": { "fg": "gray8", "bg": "gray0" }, - "external_ip": { "fg": "gray8", "bg": "gray0" }, - "network_load": { "fg": "gray8", "bg": "gray0" }, - "network_load_gradient": { "fg": "green_yellow_orange_red", "bg": "gray0" }, - "system_load": { "fg": "gray8", "bg": "gray0" }, - "system_load_gradient": { "fg": "green_yellow_orange_red", "bg": "gray0" } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/vim/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/vim/default.json deleted file mode 100644 index 9116aa0..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/vim/default.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "name": "Default color scheme", - "groups": { - "background": { "fg": "white", "bg": "gray2" }, - "background:divider": { "fg": "gray6", "bg": "gray2" }, - "mode": { "fg": "darkestgreen", "bg": "brightgreen", "attr": ["bold"] }, - "modified_indicator": { "fg": "brightyellow", "bg": "gray4", "attr": ["bold"] }, - "paste_indicator": { "fg": "white", "bg": "mediumorange", "attr": ["bold"] }, - "readonly_indicator": { "fg": "brightestred", "bg": "gray4" }, - "branch": { "fg": "gray9", "bg": "gray4" }, - "branch_dirty": { "fg": "brightyellow", "bg": "gray4" }, - "branch_clean": { "fg": "gray9", "bg": "gray4" }, - "branch:divider": { "fg": "gray7", "bg": "gray4" }, - "file_directory": { "fg": "gray9", "bg": "gray4" }, - "file_name": { "fg": "white", "bg": "gray4", "attr": ["bold"] }, - "file_size": { "fg": "gray8", "bg": "gray2" }, - "file_name_no_file": { "fg": "gray9", "bg": "gray4", "attr": ["bold"] }, - "file_name_empty": { "fg": "gray9", "bg": "gray4" }, - "file_format": { "fg": "gray8", "bg": "gray2" }, - "file_encoding": { "fg": "gray8", "bg": "gray2" }, - "file_type": { "fg": "gray8", "bg": "gray2" }, - "file_vcs_status": { "fg": "brightestred", "bg": "gray4" }, - "file_vcs_status_M": { "fg": "brightyellow", "bg": "gray4" }, - "file_vcs_status_A": { "fg": "brightgreen", "bg": "gray4" }, - "line_percent": { "fg": "gray9", "bg": "gray4" }, - "line_percent_gradient": { "fg": "green_yellow_red", "bg": "gray4" }, - "line_current": { "fg": "gray1", "bg": "gray10", "attr": ["bold"] }, - "line_current_symbol": { "fg": "gray1", "bg": "gray10" }, - "virtcol_current_gradient": { "fg": "dark_GREEN_Orange_red", "bg": "gray10" }, - "col_current": { "fg": "gray6", "bg": "gray10" }, - "modified_buffers": { "fg": "brightyellow", "bg": "gray2" } - }, - "mode_translations": { - "nc": { - "colors": { - "brightyellow": "darkorange", - "brightestred": "darkred", - "gray0": "gray0", - "gray1": "gray0", - "gray2": "gray0", - "gray3": "gray1", - "gray4": "gray1", - "gray5": "gray1", - "gray6": "gray1", - "gray7": "gray4", - "gray8": "gray4", - "gray9": "gray4", - "gray10": "gray5", - "white": "gray6", - "green_yellow_red": "gray5" - } - }, - "i": { - "colors": { - "gray0": "darkestblue", - "gray1": "darkestblue", - "gray2": "darkestblue", - "gray3": "darkblue", - "gray4": "darkblue", - "gray5": "darkestcyan", - "gray6": "darkestcyan", - "gray7": "darkestcyan", - "gray8": "mediumcyan", - "gray9": "mediumcyan", - "gray10": "mediumcyan", - "green_yellow_red": "gray5" - }, - "groups": { - "mode": { "fg": "darkestcyan", "bg": "white", "attr": ["bold"] }, - "background:divider": { "fg": "darkcyan", "bg": "darkestblue" }, - "branch:divider": { "fg": "darkcyan", "bg": "darkblue" } - } - }, - "v": { - "groups": { - "mode": { "fg": "darkorange", "bg": "brightestorange", "attr": ["bold"] } - } - }, - "V": { - "groups": { - "mode": { "fg": "darkorange", "bg": "brightestorange", "attr": ["bold"] } - } - }, - "^V": { - "groups": { - "mode": { "fg": "darkorange", "bg": "brightestorange", "attr": ["bold"] } - } - }, - "R": { - "groups": { - "mode": { "fg": "white", "bg": "brightred", "attr": ["bold"] } - } - } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/vim/solarized.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/vim/solarized.json deleted file mode 100644 index 1ae0b80..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/vim/solarized.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "name": "Solarized Dark", - "groups": { - "background": { "fg": "oldlace", "bg": "royalblue5" }, - "background:divider": { "fg": "lightskyblue4", "bg": "royalblue5" }, - "mode": { "fg": "oldlace", "bg": "green", "attr": ["bold"] }, - "modified_indicator": { "fg": "yellow", "bg": "darkgreencopper", "attr": ["bold"] }, - "paste_indicator": { "fg": "oldlace", "bg": "orange", "attr": ["bold"] }, - "readonly_indicator": { "fg": "red", "bg": "darkgreencopper" }, - "branch": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "branch_dirty": { "fg": "yellow", "bg": "darkgreencopper" }, - "branch_clean": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "branch:divider": { "fg": "gray61", "bg": "darkgreencopper" }, - "file_directory": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "file_name": { "fg": "oldlace", "bg": "darkgreencopper", "attr": ["bold"] }, - "file_size": { "fg": "oldlace", "bg": "darkgreencopper" }, - "file_name_no_file": { "fg": "oldlace", "bg": "darkgreencopper", "attr": ["bold"] }, - "file_name_empty": { "fg": "oldlace", "bg": "darkgreencopper" }, - "file_format": { "fg": "gray61", "bg": "royalblue5" }, - "file_encoding": { "fg": "gray61", "bg": "royalblue5" }, - "file_type": { "fg": "gray61", "bg": "royalblue5" }, - "file_vcs_status": { "fg": "red", "bg": "darkgreencopper" }, - "file_vcs_status_M": { "fg": "yellow", "bg": "darkgreencopper" }, - "file_vcs_status_A": { "fg": "green", "bg": "darkgreencopper" }, - "line_percent": { "fg": "oldlace", "bg": "lightskyblue4" }, - "line_percent_gradient": { "fg": "green_yellow_orange_red", "bg": "lightskyblue4" }, - "line_current": { "fg": "gray13", "bg": "lightyellow", "attr": ["bold"] }, - "line_current_symbol": { "fg": "gray13", "bg": "lightyellow" }, - "virtcol_current_gradient": { "fg": "GREEN_Orange_red", "bg": "gray10" }, - "col_current": { "fg": "azure4", "bg": "lightyellow" } - }, - "mode_translations": { - "nc": { - "colors": { - "darkgreencopper": "royalblue5", - "lightskyblue4": "royalblue5", - "azure4": "darkgreencopper", - "gray61": "lightskyblue4", - "lightyellow": "azure4", - "oldlace": "gray61" - } - }, - "i": { - "groups": { - "background": { "fg": "oldlace", "bg": "darkgreencopper" }, - "background:divider": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "mode": { "fg": "oldlace", "bg": "blue", "attr": ["bold"] }, - "modified_indicator": { "fg": "yellow", "bg": "lightyellow", "attr": ["bold"] }, - "paste_indicator": { "fg": "oldlace", "bg": "orange", "attr": ["bold"] }, - "readonly_indicator": { "fg": "red", "bg": "lightyellow" }, - "branch": { "fg": "darkgreencopper", "bg": "lightyellow" }, - "branch:divider": { "fg": "lightskyblue4", "bg": "lightyellow" }, - "file_directory": { "fg": "darkgreencopper", "bg": "lightyellow" }, - "file_name": { "fg": "royalblue5", "bg": "lightyellow", "attr": ["bold"] }, - "file_size": { "fg": "royalblue5", "bg": "lightyellow" }, - "file_name_no_file": { "fg": "royalblue5", "bg": "lightyellow", "attr": ["bold"] }, - "file_name_empty": { "fg": "royalblue5", "bg": "lightyellow" }, - "file_format": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "file_encoding": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "file_type": { "fg": "lightyellow", "bg": "darkgreencopper" }, - "file_vcs_status": { "fg": "red", "bg": "lightyellow" }, - "file_vcs_status_M": { "fg": "yellow", "bg": "lightyellow" }, - "file_vcs_status_A": { "fg": "green", "bg": "lightyellow" }, - "line_percent": { "fg": "oldlace", "bg": "gray61" }, - "line_percent_gradient": { "fg": "oldlace", "bg": "gray61" }, - "line_current": { "fg": "gray13", "bg": "oldlace", "attr": ["bold"] }, - "line_current_symbol": { "fg": "gray13", "bg": "oldlace" }, - "col_current": { "fg": "azure4", "bg": "oldlace" } - } - }, - "v": { - "groups": { - "mode": { "fg": "oldlace", "bg": "orange", "attr": ["bold"] } - } - }, - "V": { - "groups": { - "mode": { "fg": "oldlace", "bg": "orange", "attr": ["bold"] } - } - }, - "^V": { - "groups": { - "mode": { "fg": "oldlace", "bg": "orange", "attr": ["bold"] } - } - }, - "R": { - "groups": { - "mode": { "fg": "oldlace", "bg": "red", "attr": ["bold"] } - } - } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/wm/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/wm/default.json deleted file mode 100644 index d71d4e3..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/colorschemes/wm/default.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "Default color scheme for window managers", - "groups": { - "background:divider": { "fg": "gray5", "bg": "gray0" }, - "session": { "fg": "black", "bg": "gray10", "attr": ["bold"] }, - "date": { "fg": "gray8", "bg": "gray2" }, - "time": { "fg": "gray10", "bg": "gray2", "attr": ["bold"] }, - "time:divider": { "fg": "gray5", "bg": "gray2" }, - "email_alert": { "fg": "white", "bg": "brightred", "attr": ["bold"] }, - "email_alert_gradient": { "fg": "white", "bg": "yellow_orange_red", "attr": ["bold"] }, - "hostname": { "fg": "black", "bg": "gray10", "attr": ["bold"] }, - "weather": { "fg": "gray8", "bg": "gray0" }, - "weather_temp_gradient": { "fg": "blue_red", "bg": "gray0" }, - "weather_condition_hot": { "fg": "khaki1", "bg": "gray0" }, - "weather_condition_snowy": { "fg": "skyblue1", "bg": "gray0" }, - "weather_condition_rainy": { "fg": "skyblue1", "bg": "gray0" }, - "uptime": { "fg": "gray8", "bg": "gray0" }, - "external_ip": { "fg": "gray8", "bg": "gray0" }, - "network_load": { "fg": "gray8", "bg": "gray0" }, - "system_load": { "fg": "gray8", "bg": "gray0" }, - "system_load_good": { "fg": "lightyellowgreen", "bg": "gray0" }, - "system_load_bad": { "fg": "gold3", "bg": "gray0" }, - "system_load_ugly": { "fg": "orangered", "bg": "gray0" } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/config.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/config.json deleted file mode 100644 index a70922f..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/config.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "common": { - "term_truecolor": false, - "dividers": { - "left": { - "hard": " ", - "soft": " " - }, - "right": { - "hard": " ", - "soft": " " - } - }, - "spaces": 1 - }, - "ext": { - "ipython": { - "colorscheme": "default", - "theme": "in", - "local_themes": { - "rewrite": "rewrite", - "out": "out", - "in2": "in2" - } - }, - "shell": { - "colorscheme": "default", - "theme": "default" - }, - "tmux": { - "colorscheme": "default", - "theme": "default" - }, - "vim": { - "colorscheme": "default", - "theme": "default", - "local_themes": { - "cmdwin": "cmdwin", - "help": "help", - "quickfix": "quickfix" - } - }, - "wm": { - "colorscheme": "default", - "theme": "default" - } - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/in.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/in.json deleted file mode 100644 index ac979c5..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/in.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segments": { - "left": [ - { - "name": "virtualenv" - }, - { - "type": "string", - "contents": "In[", - "draw_soft_divider": false, - "highlight_group": ["prompt"] - }, - { - "name": "prompt_count", - "module": "powerline.segments.ipython", - "draw_soft_divider": false - }, - { - "type": "string", - "contents": "]", - "highlight_group": ["prompt"] - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/in2.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/in2.json deleted file mode 100644 index 601fc9e..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/in2.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segments": { - "left": [ - { - "type": "string", - "contents": "", - "width": "auto", - "highlight_group": ["prompt"] - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/out.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/out.json deleted file mode 100644 index 11a6323..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/out.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segments": { - "left": [ - { - "type": "string", - "contents": "Out[", - "draw_soft_divider": false, - "width": "auto", - "align": "r", - "highlight_group": ["prompt"] - }, - { - "name": "prompt_count", - "module": "powerline.segments.ipython", - "draw_soft_divider": false - }, - { - "type": "string", - "contents": "]", - "highlight_group": ["prompt"] - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/rewrite.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/rewrite.json deleted file mode 100644 index 47d8de0..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/ipython/rewrite.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "segments": { - "left": [ - { - "type": "string", - "contents": "", - "draw_soft_divider": false, - "width": "auto", - "highlight_group": ["prompt"] - }, - { - "name": "prompt_count", - "module": "powerline.segments.ipython", - "draw_soft_divider": false - }, - { - "type": "string", - "contents": ">", - "highlight_group": ["prompt"] - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/shell/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/shell/default.json deleted file mode 100644 index 6246a96..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/shell/default.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segment_data": { - "hostname": { - "before": " ", - "args": { - "only_if_ssh": true - } - }, - "virtualenv": { - "before": "ⓔ " - }, - "branch": { - "before": " " - } - }, - "segments": { - "left": [ - { - "name": "hostname" - }, - { - "name": "user" - }, - { - "name": "virtualenv" - }, - { - "name": "cwd", - "args": { - "dir_limit_depth": 3 - } - } - ], - "right": [ - { - "module": "powerline.segments.shell", - "name": "last_pipe_status" - }, - { - "name": "branch" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/shell/default_leftonly.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/shell/default_leftonly.json deleted file mode 100644 index 16af975..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/shell/default_leftonly.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segment_data": { - "hostname": { - "before": " ", - "args": { - "only_if_ssh": true - } - }, - "virtualenv": { - "before": "ⓔ " - }, - "branch": { - "before": " " - } - }, - "segments": { - "left": [ - { - "name": "hostname" - }, - { - "name": "user" - }, - { - "name": "virtualenv" - }, - { - "name": "branch" - }, - { - "name": "cwd", - "args": { - "dir_limit_depth": 3 - } - }, - { - "name": "last_status", - "module": "powerline.segments.shell" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/tmux/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/tmux/default.json deleted file mode 100644 index eb5d7e6..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/tmux/default.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segment_data": { - "uptime": { - "before": "⇑ " - }, - "external_ip": { - "before": "ⓦ " - }, - "date": { - "before": "⌚ " - }, - "email_imap_alert": { - "before": "✉ ", - "args": { - "username": "", - "password": "" - } - } - }, - "segments": { - "right": [ - { - "name": "uptime", - "priority": 50 - }, - { - "name": "external_ip", - "priority": 50 - }, - { - "name": "network_load", - "priority": 50 - }, - { - "name": "system_load", - "priority": 50 - }, - { - "name": "weather", - "priority": 50 - }, - { - "name": "date" - }, - { - "name": "date", - "args": { - "format": "%H:%M", - "istime": true - } - }, - { - "name": "email_imap_alert", - "priority": 10 - }, - { - "name": "hostname" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/cmdwin.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/cmdwin.json deleted file mode 100644 index c300d94..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/cmdwin.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "segments": { - "left": [ - { - "type": "string", - "contents": "Command Line", - "highlight_group": ["file_name"] - }, - { - "type": "string", - "highlight_group": ["background"], - "draw_soft_divider": false, - "draw_hard_divider": false, - "width": "auto" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/default.json deleted file mode 100644 index 29d6a2a..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/default.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "segment_data": { - "branch": { - "before": " " - }, - "modified_indicator": { - "args": { "text": "+" } - }, - "line_percent": { - "args": { "gradient": true }, - "after": "%" - }, - "line_current_symbol": { - "contents": " " - } - }, - "segments": { - "left": [ - { - "name": "mode", - "exclude_modes": ["nc"] - }, - { - "name": "paste_indicator", - "exclude_modes": ["nc"], - "priority": 10 - }, - { - "name": "branch", - "exclude_modes": ["nc"], - "priority": 30 - }, - { - "name": "readonly_indicator", - "draw_soft_divider": false, - "after": " " - }, - { - "name": "file_directory", - "priority": 40, - "draw_soft_divider": false - }, - { - "name": "file_name", - "draw_soft_divider": false - }, - { - "name": "file_vcs_status", - "before": " ", - "draw_soft_divider": false - }, - { - "name": "modified_indicator", - "before": " " - }, - { - "type": "string", - "highlight_group": ["background"], - "draw_soft_divider": false, - "draw_hard_divider": false, - "width": "auto" - } - ], - "right": [ - { - "name": "file_format", - "draw_soft_divider": false, - "exclude_modes": ["nc"], - "priority": 60 - }, - { - "name": "file_encoding", - "exclude_modes": ["nc"], - "priority": 60 - }, - { - "name": "file_type", - "exclude_modes": ["nc"], - "priority": 60 - }, - { - "name": "line_percent", - "priority": 50, - "width": 4, - "align": "r" - }, - { - "type": "string", - "name": "line_current_symbol", - "highlight_group": ["line_current_symbol", "line_current"] - }, - { - "name": "line_current", - "draw_soft_divider": false, - "width": 3, - "align": "r" - }, - { - "name": "virtcol_current", - "draw_soft_divider": false, - "priority": 20, - "before": ":", - "width": 3, - "align": "l" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/help.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/help.json deleted file mode 100644 index 7407105..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/help.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "segments": { - "left": [ - { - "name": "file_name", - "draw_soft_divider": false - }, - { - "type": "string", - "highlight_group": ["background"], - "draw_soft_divider": false, - "draw_hard_divider": false, - "width": "auto" - } - ], - "right": [ - { - "name": "line_percent", - "priority": 30, - "width": 4, - "align": "r" - }, - { - "type": "string", - "name": "line_current_symbol", - "highlight_group": ["line_current_symbol", "line_current"] - }, - { - "name": "line_current", - "draw_soft_divider": false, - "width": 3, - "align": "r" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/quickfix.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/quickfix.json deleted file mode 100644 index da77d63..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/vim/quickfix.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "segment_data": { - "buffer_name": { - "contents": "Location List" - } - }, - "segments": { - "left": [ - { - "type": "string", - "name": "buffer_name", - "highlight_group": ["file_name"], - "draw_soft_divider": false - }, - { - "type": "string", - "highlight_group": ["background"], - "draw_soft_divider": false, - "draw_hard_divider": false, - "width": "auto" - } - ], - "right": [ - { - "type": "string", - "name": "line_current_symbol", - "highlight_group": ["line_current_symbol", "line_current"] - }, - { - "name": "line_current", - "draw_soft_divider": false, - "width": 3, - "align": "r" - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/wm/default.json b/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/wm/default.json deleted file mode 100644 index c1cee4b..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/config_files/themes/wm/default.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "default_module": "powerline.segments.common", - "segments": { - "right": [ - { - "name": "weather", - "priority": 50 - }, - { - "name": "date" - }, - { - "name": "date", - "args": { - "format": "%H:%M", - "istime": true - }, - "before": "⌚ " - }, - { - "name": "email_imap_alert", - "before": "✉ ", - "priority": 10, - "args": { - "username": "", - "password": "" - } - } - ] - } -} diff --git a/common/.local/lib/python2.7/site-packages/powerline/ipython.py b/common/.local/lib/python2.7/site-packages/powerline/ipython.py deleted file mode 100644 index bed4151..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/ipython.py +++ /dev/null @@ -1,30 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline import Powerline -from powerline.lib import mergedicts - - -class IpythonPowerline(Powerline): - def __init__(self): - super(IpythonPowerline, self).__init__('ipython', use_daemon_threads=True) - - def get_config_paths(self): - if self.path: - return [self.path] - else: - return super(IpythonPowerline, self).get_config_paths() - - def get_local_themes(self, local_themes): - return dict(((type, {'config': self.load_theme_config(name)}) for type, name in local_themes.items())) - - def load_main_config(self): - r = super(IpythonPowerline, self).load_main_config() - if self.config_overrides: - mergedicts(r, self.config_overrides) - return r - - def load_theme_config(self, name): - r = super(IpythonPowerline, self).load_theme_config(name) - if name in self.theme_overrides: - mergedicts(r, self.theme_overrides[name]) - return r diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/lib/__init__.py deleted file mode 100644 index 8f6a7ba..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# vim:fileencoding=utf-8:noet -from functools import wraps -import json - - -def wraps_saveargs(wrapped): - def dec(wrapper): - r = wraps(wrapped)(wrapper) - r.powerline_origin = getattr(wrapped, 'powerline_origin', wrapped) - return r - return dec - - -def mergedicts(d1, d2): - '''Recursively merge two dictionaries. First dictionary is modified in-place. - ''' - for k in d2: - if k in d1 and type(d1[k]) is dict and type(d2[k]) is dict: - mergedicts(d1[k], d2[k]) - else: - d1[k] = d2[k] - - -def add_divider_highlight_group(highlight_group): - def dec(func): - @wraps_saveargs(func) - def f(**kwargs): - r = func(**kwargs) - if r: - return [{ - 'contents': r, - 'divider_highlight_group': highlight_group, - }] - else: - return None - return f - return dec - - -def keyvaluesplit(s): - if '=' not in s: - raise TypeError('Option must look like option=json_value') - if s[0] == '_': - raise ValueError('Option names must not start with `_\'') - idx = s.index('=') - o = s[:idx] - val = json.loads(s[idx + 1:]) - return (o, val) - - -def parsedotval(s): - if type(s) is tuple: - o, val = s - else: - o, val = keyvaluesplit(s) - - keys = o.split('.') - if len(keys) > 1: - r = (keys[0], {}) - rcur = r[1] - for key in keys[1:-1]: - rcur[key] = {} - rcur = rcur[key] - rcur[keys[-1]] = val - return r - else: - return (o, val) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/config.py b/common/.local/lib/python2.7/site-packages/powerline/lib/config.py deleted file mode 100644 index b3ef57b..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/config.py +++ /dev/null @@ -1,156 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.lib.threaded import MultiRunnedThread -from powerline.lib.file_watcher import create_file_watcher - -from threading import Event, Lock -from collections import defaultdict - -import json - - -def open_file(path): - return open(path, 'r') - - -def load_json_config(config_file_path, load=json.load, open_file=open_file): - with open_file(config_file_path) as config_file_fp: - return load(config_file_fp) - - -class ConfigLoader(MultiRunnedThread): - def __init__(self, shutdown_event=None, watcher=None, load=load_json_config): - super(ConfigLoader, self).__init__() - self.shutdown_event = shutdown_event or Event() - self.watcher = watcher or create_file_watcher() - self._load = load - - self.pl = None - self.interval = None - - self.lock = Lock() - - self.watched = defaultdict(set) - self.missing = defaultdict(set) - self.loaded = {} - - def set_pl(self, pl): - self.pl = pl - - def set_interval(self, interval): - self.interval = interval - - def register(self, function, path): - '''Register function that will be run when file changes. - - :param function function: - Function that will be called when file at the given path changes. - :param str path: - Path that will be watched for. - ''' - with self.lock: - self.watched[path].add(function) - self.watcher.watch(path) - - def register_missing(self, condition_function, function, key): - '''Register any function that will be called with given key each - interval seconds (interval is defined at __init__). Its result is then - passed to ``function``, but only if the result is true. - - :param function condition_function: - Function which will be called each ``interval`` seconds. All - exceptions from it will be ignored. - :param function function: - Function which will be called if condition_function returns - something that is true. Accepts result of condition_function as an - argument. - :param str key: - Any value, it will be passed to condition_function on each call. - - Note: registered functions will be automatically removed if - condition_function results in something true. - ''' - with self.lock: - self.missing[key].add((condition_function, function)) - - def unregister_functions(self, removed_functions): - '''Unregister files handled by these functions. - - :param set removed_functions: - Set of functions previously passed to ``.register()`` method. - ''' - with self.lock: - for path, functions in list(self.watched.items()): - functions -= removed_functions - if not functions: - self.watched.pop(path) - self.loaded.pop(path, None) - - def unregister_missing(self, removed_functions): - '''Unregister files handled by these functions. - - :param set removed_functions: - Set of pairs (2-tuples) representing ``(condition_function, - function)`` function pairs previously passed as an arguments to - ``.register_missing()`` method. - ''' - with self.lock: - for key, functions in list(self.missing.items()): - functions -= removed_functions - if not functions: - self.missing.pop(key) - - def load(self, path): - try: - # No locks: GIL does what we need - return self.loaded[path] - except KeyError: - r = self._load(path) - self.loaded[path] = r - return r - - def update(self): - toload = [] - with self.lock: - for path, functions in self.watched.items(): - for function in functions: - try: - modified = self.watcher(path) - except OSError as e: - modified = True - self.exception('Error while running watcher for path {0}: {1}', path, str(e)) - else: - if modified: - toload.append(path) - if modified: - function(path) - with self.lock: - for key, functions in list(self.missing.items()): - for condition_function, function in list(functions): - try: - path = condition_function(key) - except Exception as e: - self.exception('Error while running condition function for key {0}: {1}', key, str(e)) - else: - if path: - toload.append(path) - function(path) - functions.remove((condition_function, function)) - if not functions: - self.missing.pop(key) - for path in toload: - try: - self.loaded[path] = self._load(path) - except Exception as e: - self.exception('Error while loading {0}: {1}', path, str(e)) - - def run(self): - while self.interval is not None and not self.shutdown_event.is_set(): - self.update() - self.shutdown_event.wait(self.interval) - - def exception(self, msg, *args, **kwargs): - if self.pl: - self.pl.exception(msg, prefix='config_loader', *args, **kwargs) - else: - raise diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/file_watcher.py b/common/.local/lib/python2.7/site-packages/powerline/lib/file_watcher.py deleted file mode 100644 index d2874c8..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/file_watcher.py +++ /dev/null @@ -1,181 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import unicode_literals, absolute_import - -__copyright__ = '2013, Kovid Goyal ' -__docformat__ = 'restructuredtext en' - -import os -import sys -from time import sleep -from threading import RLock - -from powerline.lib.monotonic import monotonic -from powerline.lib.inotify import INotify, INotifyError - - -class INotifyWatch(INotify): - is_stat_based = False - - def __init__(self, expire_time=10): - super(INotifyWatch, self).__init__() - self.watches = {} - self.modified = {} - self.last_query = {} - self.lock = RLock() - self.expire_time = expire_time * 60 - - def expire_watches(self): - now = monotonic() - for path, last_query in tuple(self.last_query.items()): - if last_query - now > self.expire_time: - self.unwatch(path) - - def process_event(self, wd, mask, cookie, name): - if wd == -1 and (mask & self.Q_OVERFLOW): - # We missed some INOTIFY events, so we dont - # know the state of any tracked files. - for path in tuple(self.modified): - if os.path.exists(path): - self.modified[path] = True - else: - self.watches.pop(path, None) - self.modified.pop(path, None) - self.last_query.pop(path, None) - return - - for path, num in tuple(self.watches.items()): - if num == wd: - if mask & self.IGNORED: - self.watches.pop(path, None) - self.modified.pop(path, None) - self.last_query.pop(path, None) - else: - self.modified[path] = True - - def unwatch(self, path): - ''' Remove the watch for path. Raises an OSError if removing the watch - fails for some reason. ''' - path = self.os.path.abspath(path) - with self.lock: - self.modified.pop(path, None) - self.last_query.pop(path, None) - wd = self.watches.pop(path, None) - if wd is not None: - if self._rm_watch(self._inotify_fd, wd) != 0: - self.handle_error() - - def watch(self, path): - ''' Register a watch for the file named path. Raises an OSError if path - does not exist. ''' - import ctypes - path = self.os.path.abspath(path) - with self.lock: - if path not in self.watches: - bpath = path if isinstance(path, bytes) else path.encode(self.fenc) - wd = self._add_watch(self._inotify_fd, ctypes.c_char_p(bpath), - self.MODIFY | self.ATTRIB | self.MOVE_SELF | self.DELETE_SELF) - if wd == -1: - self.handle_error() - self.watches[path] = wd - self.modified[path] = False - - def __call__(self, path): - ''' Return True if path has been modified since the last call. Can - raise OSError if the path does not exist. ''' - path = self.os.path.abspath(path) - with self.lock: - self.last_query[path] = monotonic() - self.expire_watches() - if path not in self.watches: - # Try to re-add the watch, it will fail if the file does not - # exist/you dont have permission - self.watch(path) - return True - self.read(get_name=False) - if path not in self.modified: - # An ignored event was received which means the path has been - # automatically unwatched - return True - ans = self.modified[path] - if ans: - self.modified[path] = False - return ans - - def close(self): - with self.lock: - for path in tuple(self.watches): - try: - self.unwatch(path) - except OSError: - pass - super(INotifyWatch, self).close() - - -class StatWatch(object): - is_stat_based = True - - def __init__(self): - self.watches = {} - self.lock = RLock() - - def watch(self, path): - path = os.path.abspath(path) - with self.lock: - self.watches[path] = os.path.getmtime(path) - - def unwatch(self, path): - path = os.path.abspath(path) - with self.lock: - self.watches.pop(path, None) - - def __call__(self, path): - path = os.path.abspath(path) - with self.lock: - if path not in self.watches: - self.watches[path] = os.path.getmtime(path) - return True - mtime = os.path.getmtime(path) - if mtime != self.watches[path]: - self.watches[path] = mtime - return True - return False - - def close(self): - with self.lock: - self.watches.clear() - - -def create_file_watcher(use_stat=False, expire_time=10): - ''' - Create an object that can watch for changes to specified files. To use: - - watcher = create_file_watcher() - watcher(path1) # Will return True if path1 has changed since the last time this was called. Always returns True the first time. - watcher.unwatch(path1) - - Uses inotify if available, otherwise tracks mtimes. expire_time is the - number of minutes after the last query for a given path for the inotify - watch for that path to be automatically removed. This conserves kernel - resources. - ''' - if use_stat: - return StatWatch() - try: - return INotifyWatch(expire_time=expire_time) - except INotifyError: - pass - return StatWatch() - -if __name__ == '__main__': - watcher = create_file_watcher() - print ('Using watcher: %s' % watcher.__class__.__name__) - print ('Watching %s, press Ctrl-C to quit' % sys.argv[-1]) - watcher.watch(sys.argv[-1]) - try: - while True: - if watcher(sys.argv[-1]): - print ('%s has changed' % sys.argv[-1]) - sleep(1) - except KeyboardInterrupt: - pass - watcher.close() diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/humanize_bytes.py b/common/.local/lib/python2.7/site-packages/powerline/lib/humanize_bytes.py deleted file mode 100644 index 769c7d1..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/humanize_bytes.py +++ /dev/null @@ -1,22 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from math import log -unit_list = tuple(zip(['', 'k', 'M', 'G', 'T', 'P'], [0, 0, 1, 2, 2, 2])) - - -def humanize_bytes(num, suffix='B', si_prefix=False): - '''Return a human friendly byte representation. - - Modified version from http://stackoverflow.com/questions/1094841 - ''' - if num == 0: - return '0 ' + suffix - div = 1000 if si_prefix else 1024 - exponent = min(int(log(num, div)) if num else 0, len(unit_list) - 1) - quotient = float(num) / div ** exponent - unit, decimals = unit_list[exponent] - if unit and not si_prefix: - unit = unit.upper() + 'i' - return '{{quotient:.{decimals}f}} {{unit}}{{suffix}}'\ - .format(decimals=decimals)\ - .format(quotient=quotient, unit=unit, suffix=suffix) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/inotify.py b/common/.local/lib/python2.7/site-packages/powerline/lib/inotify.py deleted file mode 100644 index 9f247bc..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/inotify.py +++ /dev/null @@ -1,178 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import unicode_literals, absolute_import - -__copyright__ = '2013, Kovid Goyal ' -__docformat__ = 'restructuredtext en' - -import sys -import os -import errno - - -class INotifyError(Exception): - pass - - -_inotify = None - - -def load_inotify(): - ''' Initialize the inotify library ''' - global _inotify - if _inotify is None: - if hasattr(sys, 'getwindowsversion'): - # On windows abort before loading the C library. Windows has - # multiple, incompatible C runtimes, and we have no way of knowing - # if the one chosen by ctypes is compatible with the currently - # loaded one. - raise INotifyError('INotify not available on windows') - import ctypes - if not hasattr(ctypes, 'c_ssize_t'): - raise INotifyError('You need python >= 2.7 to use inotify') - from ctypes.util import find_library - name = find_library('c') - if not name: - raise INotifyError('Cannot find C library') - libc = ctypes.CDLL(name, use_errno=True) - for function in ("inotify_add_watch", "inotify_init1", "inotify_rm_watch"): - if not hasattr(libc, function): - raise INotifyError('libc is too old') - # inotify_init1() - prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, use_errno=True) - init1 = prototype(('inotify_init1', libc), ((1, "flags", 0),)) - - # inotify_add_watch() - prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint32, use_errno=True) - add_watch = prototype(('inotify_add_watch', libc), ( - (1, "fd"), (1, "pathname"), (1, "mask")), use_errno=True) - - # inotify_rm_watch() - prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int, use_errno=True) - rm_watch = prototype(('inotify_rm_watch', libc), ( - (1, "fd"), (1, "wd")), use_errno=True) - - # read() - prototype = ctypes.CFUNCTYPE(ctypes.c_ssize_t, ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t, use_errno=True) - read = prototype(('read', libc), ( - (1, "fd"), (1, "buf"), (1, "count")), use_errno=True) - _inotify = (init1, add_watch, rm_watch, read) - return _inotify - - -class INotify(object): - # See for the flags defined below - - # Supported events suitable for MASK parameter of INOTIFY_ADD_WATCH. - ACCESS = 0x00000001 # File was accessed. - MODIFY = 0x00000002 # File was modified. - ATTRIB = 0x00000004 # Metadata changed. - CLOSE_WRITE = 0x00000008 # Writtable file was closed. - CLOSE_NOWRITE = 0x00000010 # Unwrittable file closed. - OPEN = 0x00000020 # File was opened. - MOVED_FROM = 0x00000040 # File was moved from X. - MOVED_TO = 0x00000080 # File was moved to Y. - CREATE = 0x00000100 # Subfile was created. - DELETE = 0x00000200 # Subfile was deleted. - DELETE_SELF = 0x00000400 # Self was deleted. - MOVE_SELF = 0x00000800 # Self was moved. - - # Events sent by the kernel. - UNMOUNT = 0x00002000 # Backing fs was unmounted. - Q_OVERFLOW = 0x00004000 # Event queued overflowed. - IGNORED = 0x00008000 # File was ignored. - - # Helper events. - CLOSE = (CLOSE_WRITE | CLOSE_NOWRITE) # Close. - MOVE = (MOVED_FROM | MOVED_TO) # Moves. - - # Special flags. - ONLYDIR = 0x01000000 # Only watch the path if it is a directory. - DONT_FOLLOW = 0x02000000 # Do not follow a sym link. - EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects. - MASK_ADD = 0x20000000 # Add to the mask of an already existing watch. - ISDIR = 0x40000000 # Event occurred against dir. - ONESHOT = 0x80000000 # Only send event once. - - # All events which a program can wait on. - ALL_EVENTS = (ACCESS | MODIFY | ATTRIB | CLOSE_WRITE | CLOSE_NOWRITE | - OPEN | MOVED_FROM | MOVED_TO | CREATE | DELETE | - DELETE_SELF | MOVE_SELF) - - # See - CLOEXEC = 0x80000 - NONBLOCK = 0x800 - - def __init__(self, cloexec=True, nonblock=True): - import ctypes - import struct - self._init1, self._add_watch, self._rm_watch, self._read = load_inotify() - flags = 0 - if cloexec: - flags |= self.CLOEXEC - if nonblock: - flags |= self.NONBLOCK - self._inotify_fd = self._init1(flags) - if self._inotify_fd == -1: - raise INotifyError(os.strerror(ctypes.get_errno())) - - self._buf = ctypes.create_string_buffer(5000) - self.fenc = sys.getfilesystemencoding() or 'utf-8' - self.hdr = struct.Struct(b'iIII') - if self.fenc == 'ascii': - self.fenc = 'utf-8' - # We keep a reference to os to prevent it from being deleted - # during interpreter shutdown, which would lead to errors in the - # __del__ method - self.os = os - - def handle_error(self): - import ctypes - eno = ctypes.get_errno() - raise OSError(eno, self.os.strerror(eno)) - - def __del__(self): - # This method can be called during interpreter shutdown, which means we - # must do the absolute minimum here. Note that there could be running - # daemon threads that are trying to call other methods on this object. - try: - self.os.close(self._inotify_fd) - except (AttributeError, TypeError): - pass - - def close(self): - if hasattr(self, '_inotify_fd'): - self.os.close(self._inotify_fd) - del self.os - del self._add_watch - del self._rm_watch - del self._inotify_fd - - def read(self, get_name=True): - import ctypes - buf = [] - while True: - num = self._read(self._inotify_fd, self._buf, len(self._buf)) - if num == 0: - break - if num < 0: - en = ctypes.get_errno() - if en == errno.EAGAIN: - break # No more data - if en == errno.EINTR: - continue # Interrupted, try again - raise OSError(en, self.os.strerror(en)) - buf.append(self._buf.raw[:num]) - raw = b''.join(buf) - pos = 0 - lraw = len(raw) - while lraw - pos >= self.hdr.size: - wd, mask, cookie, name_len = self.hdr.unpack_from(raw, pos) - pos += self.hdr.size - name = None - if get_name: - name = raw[pos:pos + name_len].rstrip(b'\0').decode(self.fenc) - pos += name_len - self.process_event(wd, mask, cookie, name) - - def process_event(self, *args): - raise NotImplementedError() diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/memoize.py b/common/.local/lib/python2.7/site-packages/powerline/lib/memoize.py deleted file mode 100644 index e180f30..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/memoize.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from functools import wraps -from powerline.lib.monotonic import monotonic - - -def default_cache_key(**kwargs): - return frozenset(kwargs.items()) - - -class memoize(object): - '''Memoization decorator with timeout.''' - def __init__(self, timeout, cache_key=default_cache_key, cache_reg_func=None): - self.timeout = timeout - self.cache_key = cache_key - self.cache = {} - self.cache_reg_func = cache_reg_func - - def __call__(self, func): - @wraps(func) - def decorated_function(**kwargs): - if self.cache_reg_func: - self.cache_reg_func(self.cache) - self.cache_reg_func = None - - key = self.cache_key(**kwargs) - try: - cached = self.cache.get(key, None) - except TypeError: - return func(**kwargs) - # Handle case when time() appears to be less then cached['time'] due - # to clock updates. Not applicable for monotonic clock, but this - # case is currently rare. - if cached is None or not (cached['time'] < monotonic() < cached['time'] + self.timeout): - cached = self.cache[key] = { - 'result': func(**kwargs), - 'time': monotonic(), - } - return cached['result'] - return decorated_function diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/monotonic.py b/common/.local/lib/python2.7/site-packages/powerline/lib/monotonic.py deleted file mode 100644 index 93f147f..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/monotonic.py +++ /dev/null @@ -1,103 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import division, absolute_import - -try: - try: - # >=python-3.3, Unix - from time import clock_gettime - try: - # >={kernel}-sources-2.6.28 - from time import CLOCK_MONOTONIC_RAW as CLOCK_ID - except ImportError: - from time import CLOCK_MONOTONIC as CLOCK_ID # NOQA - - monotonic = lambda: clock_gettime(CLOCK_ID) - - except ImportError: - # >=python-3.3 - from time import monotonic # NOQA - -except ImportError: - import ctypes - import sys - - try: - if sys.platform == 'win32': - # Windows only - GetTickCount64 = ctypes.windll.kernel32.GetTickCount64 - GetTickCount64.restype = ctypes.c_ulonglong - - def monotonic(): # NOQA - return GetTickCount64() / 1000 - - elif sys.platform == 'darwin': - # Mac OS X - from ctypes.util import find_library - - libc_name = find_library('c') - if not libc_name: - raise OSError - - libc = ctypes.CDLL(libc_name, use_errno=True) - - mach_absolute_time = libc.mach_absolute_time - mach_absolute_time.argtypes = () - mach_absolute_time.restype = ctypes.c_uint64 - - class mach_timebase_info_data_t(ctypes.Structure): - _fields_ = ( - ('numer', ctypes.c_uint32), - ('denom', ctypes.c_uint32), - ) - mach_timebase_info_data_p = ctypes.POINTER(mach_timebase_info_data_t) - - _mach_timebase_info = libc.mach_timebase_info - _mach_timebase_info.argtypes = (mach_timebase_info_data_p,) - _mach_timebase_info.restype = ctypes.c_int - - def mach_timebase_info(): - timebase = mach_timebase_info_data_t() - _mach_timebase_info(ctypes.byref(timebase)) - return (timebase.numer, timebase.denom) - - timebase = mach_timebase_info() - factor = timebase[0] / timebase[1] * 1e-9 - - def monotonic(): # NOQA - return mach_absolute_time() * factor - else: - # linux only (no librt on OS X) - import os - - # See - CLOCK_MONOTONIC = 1 - CLOCK_MONOTONIC_RAW = 4 - - class timespec(ctypes.Structure): - _fields_ = ( - ('tv_sec', ctypes.c_long), - ('tv_nsec', ctypes.c_long) - ) - tspec = timespec() - - librt = ctypes.CDLL('librt.so.1', use_errno=True) - clock_gettime = librt.clock_gettime - clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)] - - if clock_gettime(CLOCK_MONOTONIC_RAW, ctypes.pointer(tspec)) == 0: - # >={kernel}-sources-2.6.28 - clock_id = CLOCK_MONOTONIC_RAW - elif clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(tspec)) == 0: - clock_id = CLOCK_MONOTONIC - else: - raise OSError - - def monotonic(): # NOQA - if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(tspec)) != 0: - errno_ = ctypes.get_errno() - raise OSError(errno_, os.strerror(errno_)) - return tspec.tv_sec + tspec.tv_nsec / 1e9 - - except: - from time import time as monotonic # NOQA diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/threaded.py b/common/.local/lib/python2.7/site-packages/powerline/lib/threaded.py deleted file mode 100644 index 7ceea79..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/threaded.py +++ /dev/null @@ -1,204 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import - -from powerline.lib.monotonic import monotonic - -from threading import Thread, Lock, Event - - -class MultiRunnedThread(object): - daemon = True - - def __init__(self): - self.thread = None - - def is_alive(self): - return self.thread and self.thread.is_alive() - - def start(self): - self.shutdown_event.clear() - self.thread = Thread(target=self.run) - self.thread.daemon = self.daemon - self.thread.start() - - def join(self, *args, **kwargs): - if self.thread: - return self.thread.join(*args, **kwargs) - return None - - -class ThreadedSegment(MultiRunnedThread): - min_sleep_time = 0.1 - update_first = True - interval = 1 - daemon = False - - def __init__(self): - super(ThreadedSegment, self).__init__() - self.run_once = True - self.skip = False - self.crashed_value = None - self.update_value = None - self.updated = False - - def __call__(self, pl, update_first=True, **kwargs): - if self.run_once: - self.pl = pl - self.set_state(**kwargs) - update_value = self.get_update_value(True) - elif not self.is_alive(): - # Without this we will not have to wait long until receiving bug “I - # opened vim, but branch information is only shown after I move - # cursor”. - # - # If running once .update() is called in __call__. - update_value = self.get_update_value(update_first and self.update_first) - self.start() - elif not self.updated: - update_value = self.get_update_value(True) - self.updated = True - else: - update_value = self.update_value - - if self.skip: - return self.crashed_value - - return self.render(update_value, update_first=update_first, pl=pl, **kwargs) - - def get_update_value(self, update=False): - if update: - self.update_value = self.update(self.update_value) - return self.update_value - - def run(self): - while not self.shutdown_event.is_set(): - start_time = monotonic() - try: - self.update_value = self.update(self.update_value) - except Exception as e: - self.exception('Exception while updating: {0}', str(e)) - self.skip = True - except KeyboardInterrupt: - self.warn('Caught keyboard interrupt while updating') - self.skip = True - else: - self.skip = False - self.shutdown_event.wait(max(self.interval - (monotonic() - start_time), self.min_sleep_time)) - - def shutdown(self): - self.shutdown_event.set() - if self.daemon and self.is_alive(): - # Give the worker thread a chance to shutdown, but don't block for - # too long - self.join(0.01) - - def set_interval(self, interval=None): - # Allowing “interval” keyword in configuration. - # Note: Here **kwargs is needed to support foreign data, in subclasses - # it can be seen in a number of places in order to support - # .set_interval(). - interval = interval or getattr(self, 'interval') - self.interval = interval - - def set_state(self, interval=None, update_first=True, shutdown_event=None, **kwargs): - self.set_interval(interval) - self.shutdown_event = shutdown_event or Event() - self.updated = self.updated or (not (update_first and self.update_first)) - - def startup(self, pl, **kwargs): - self.run_once = False - self.pl = pl - self.daemon = pl.use_daemon_threads - - self.set_state(**kwargs) - - if not self.is_alive(): - self.start() - - def critical(self, *args, **kwargs): - self.pl.critical(prefix=self.__class__.__name__, *args, **kwargs) - - def exception(self, *args, **kwargs): - self.pl.exception(prefix=self.__class__.__name__, *args, **kwargs) - - def info(self, *args, **kwargs): - self.pl.info(prefix=self.__class__.__name__, *args, **kwargs) - - def error(self, *args, **kwargs): - self.pl.error(prefix=self.__class__.__name__, *args, **kwargs) - - def warn(self, *args, **kwargs): - self.pl.warn(prefix=self.__class__.__name__, *args, **kwargs) - - def debug(self, *args, **kwargs): - self.pl.debug(prefix=self.__class__.__name__, *args, **kwargs) - - -class KwThreadedSegment(ThreadedSegment): - drop_interval = 10 * 60 - update_first = True - - def __init__(self): - super(KwThreadedSegment, self).__init__() - self.updated = True - self.update_value = ({}, set()) - self.write_lock = Lock() - self.new_queries = {} - - @staticmethod - def key(**kwargs): - return frozenset(kwargs.items()) - - def render(self, update_value, update_first, **kwargs): - queries, crashed = update_value - key = self.key(**kwargs) - if key in crashed: - return self.crashed_value - - try: - update_state = queries[key][1] - except KeyError: - # Allow only to forbid to compute missing values: in either user - # configuration or in subclasses. - update_state = self.compute_state(key) if ((update_first and self.update_first) or self.run_once) else None - - with self.write_lock: - self.new_queries[key] = (monotonic(), update_state) - return self.render_one(update_state, **kwargs) - - def update(self, old_update_value): - updates = {} - crashed = set() - update_value = (updates, crashed) - queries = old_update_value[0] - with self.write_lock: - if self.new_queries: - queries.update(self.new_queries) - self.new_queries.clear() - - for key, (last_query_time, state) in queries.items(): - if last_query_time < monotonic() < last_query_time + self.drop_interval: - try: - updates[key] = (last_query_time, self.compute_state(key)) - except Exception as e: - self.exception('Exception while computing state for {0!r}: {1}', key, str(e)) - crashed.add(key) - except KeyboardInterrupt: - self.warn('Interrupt while computing state for {0!r}', key) - crashed.add(key) - - return update_value - - def set_state(self, interval=None, shutdown_event=None, **kwargs): - self.set_interval(interval) - self.shutdown_event = shutdown_event or Event() - - @staticmethod - def render_one(update_state, **kwargs): - return update_state - - -def with_docstring(instance, doc): - instance.__doc__ = doc - return instance diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/tree_watcher.py b/common/.local/lib/python2.7/site-packages/powerline/lib/tree_watcher.py deleted file mode 100644 index c856889..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/tree_watcher.py +++ /dev/null @@ -1,199 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import (unicode_literals, absolute_import, print_function) - -__copyright__ = '2013, Kovid Goyal ' -__docformat__ = 'restructuredtext en' - -import sys -import os -import errno -from time import sleep -from powerline.lib.monotonic import monotonic - -from powerline.lib.inotify import INotify, INotifyError - - -class NoSuchDir(ValueError): - pass - - -class DirTooLarge(ValueError): - def __init__(self, bdir): - ValueError.__init__(self, 'The directory {0} is too large to monitor. Try increasing the value in /proc/sys/fs/inotify/max_user_watches'.format(bdir)) - - -class INotifyTreeWatcher(INotify): - is_dummy = False - - def __init__(self, basedir): - super(INotifyTreeWatcher, self).__init__() - self.basedir = os.path.abspath(basedir) - self.watch_tree() - self.modified = True - - def watch_tree(self): - self.watched_dirs = {} - self.watched_rmap = {} - try: - self.add_watches(self.basedir) - except OSError as e: - if e.errno == errno.ENOSPC: - raise DirTooLarge(self.basedir) - - def add_watches(self, base, top_level=True): - ''' Add watches for this directory and all its descendant directories, - recursively. ''' - base = os.path.abspath(base) - try: - is_dir = self.add_watch(base) - except OSError as e: - if e.errno == errno.ENOENT: - # The entry could have been deleted between listdir() and - # add_watch(). - if top_level: - raise NoSuchDir('The dir {0} does not exist'.format(base)) - return - if e.errno == errno.EACCES: - # We silently ignore entries for which we dont have permission, - # unless they are the top level dir - if top_level: - raise NoSuchDir('You do not have permission to monitor {0}'.format(base)) - return - raise - else: - if is_dir: - try: - files = os.listdir(base) - except OSError as e: - if e.errno in (errno.ENOTDIR, errno.ENOENT): - # The dir was deleted/replaced between the add_watch() - # and listdir() - if top_level: - raise NoSuchDir('The dir {0} does not exist'.format(base)) - return - raise - for x in files: - self.add_watches(os.path.join(base, x), top_level=False) - elif top_level: - # The top level dir is a file, not good. - raise NoSuchDir('The dir {0} does not exist'.format(base)) - - def add_watch(self, path): - import ctypes - bpath = path if isinstance(path, bytes) else path.encode(self.fenc) - wd = self._add_watch(self._inotify_fd, ctypes.c_char_p(bpath), - # Ignore symlinks and watch only directories - self.DONT_FOLLOW | self.ONLYDIR | - - self.MODIFY | self.CREATE | self.DELETE | - self.MOVE_SELF | self.MOVED_FROM | self.MOVED_TO | - self.ATTRIB | self.MOVE_SELF | self.DELETE_SELF) - if wd == -1: - eno = ctypes.get_errno() - if eno == errno.ENOTDIR: - return False - raise OSError(eno, 'Failed to add watch for: {0}: {1}'.format(path, self.os.strerror(eno))) - self.watched_dirs[path] = wd - self.watched_rmap[wd] = path - return True - - def process_event(self, wd, mask, cookie, name): - if wd == -1 and (mask & self.Q_OVERFLOW): - # We missed some INOTIFY events, so we dont - # know the state of any tracked dirs. - self.watch_tree() - self.modified = True - return - path = self.watched_rmap.get(wd, None) - if path is not None: - self.modified = True - if mask & self.CREATE: - # A new sub-directory might have been created, monitor it. - try: - self.add_watch(os.path.join(path, name)) - except OSError as e: - if e.errno == errno.ENOENT: - # Deleted before add_watch() - pass - elif e.errno == errno.ENOSPC: - raise DirTooLarge(self.basedir) - else: - raise - - def __call__(self): - self.read() - ret = self.modified - self.modified = False - return ret - - -class DummyTreeWatcher(object): - is_dummy = True - - def __init__(self, basedir): - self.basedir = os.path.abspath(basedir) - - def __call__(self): - return False - - -class TreeWatcher(object): - def __init__(self, expire_time=10): - self.watches = {} - self.last_query_times = {} - self.expire_time = expire_time * 60 - - def watch(self, path, logger=None): - path = os.path.abspath(path) - try: - w = INotifyTreeWatcher(path) - except (INotifyError, DirTooLarge) as e: - if logger is not None: - logger.warn('Failed to watch path: {0} with error: {1}'.format(path, e)) - w = DummyTreeWatcher(path) - self.watches[path] = w - return w - - def is_actually_watched(self, path): - w = self.watches.get(path, None) - return not getattr(w, 'is_dummy', True) - - def expire_old_queries(self): - pop = [] - now = monotonic() - for path, lt in self.last_query_times.items(): - if now - lt > self.expire_time: - pop.append(path) - for path in pop: - del self.last_query_times[path] - - def __call__(self, path, logger=None): - path = os.path.abspath(path) - self.expire_old_queries() - self.last_query_times[path] = monotonic() - w = self.watches.get(path, None) - if w is None: - try: - self.watch(path) - except NoSuchDir: - pass - return True - try: - return w() - except DirTooLarge as e: - if logger is not None: - logger.warn(str(e)) - self.watches[path] = DummyTreeWatcher(path) - return False - -if __name__ == '__main__': - w = INotifyTreeWatcher(sys.argv[-1]) - w() - print ('Monitoring', sys.argv[-1], 'press Ctrl-C to stop') - try: - while True: - if w(): - print (sys.argv[-1], 'changed') - sleep(1) - except KeyboardInterrupt: - raise SystemExit(0) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/url.py b/common/.local/lib/python2.7/site-packages/powerline/lib/url.py deleted file mode 100644 index 6e59934..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/url.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim:fileencoding=utf-8:noet - -try: - from urllib.error import HTTPError - from urllib.request import urlopen - from urllib.parse import urlencode as urllib_urlencode # NOQA -except ImportError: - from urllib2 import urlopen, HTTPError # NOQA - from urllib import urlencode as urllib_urlencode # NOQA - - -def urllib_read(url): - try: - return urlopen(url, timeout=10).read().decode('utf-8') - except HTTPError: - return diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/__init__.py deleted file mode 100644 index 4b45293..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import absolute_import -import os - - -vcs_props = ( - ('git', '.git', os.path.exists), - ('mercurial', '.hg', os.path.isdir), - ('bzr', '.bzr', os.path.isdir), -) - - -def generate_directories(path): - yield path - while True: - old_path = path - path = os.path.dirname(path) - if path == old_path or not path: - break - yield path - - -def guess(path): - for directory in generate_directories(path): - for vcs, vcs_dir, check in vcs_props: - if check(os.path.join(directory, vcs_dir)): - try: - if vcs not in globals(): - globals()[vcs] = getattr(__import__('powerline.lib.vcs', fromlist=[vcs]), vcs) - return globals()[vcs].Repository(directory) - except: - pass - return None diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/bzr.py b/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/bzr.py deleted file mode 100644 index c243836..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/bzr.py +++ /dev/null @@ -1,64 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import absolute_import, unicode_literals, division, print_function - -import sys -from io import StringIO - -from bzrlib import (branch, workingtree, status, library_state, trace, ui) - - -class CoerceIO(StringIO): - def write(self, arg): - if isinstance(arg, bytes): - arg = arg.decode('utf-8', 'replace') - return super(CoerceIO, self).write(arg) - - -class Repository(object): - def __init__(self, directory): - if isinstance(directory, bytes): - directory = directory.decode(sys.getfilesystemencoding() or sys.getdefaultencoding() or 'utf-8') - self.directory = directory - self.state = library_state.BzrLibraryState(ui=ui.SilentUIFactory, trace=trace.DefaultConfig()) - - def status(self, path=None): - '''Return status of repository or file. - - Without file argument: returns status of the repository: - - :"D?": dirty (tracked modified files: added, removed, deleted, modified), - :"?U": untracked-dirty (added, but not tracked files) - :None: clean (status is empty) - - With file argument: returns status of this file: The status codes are - those returned by bzr status -S - ''' - try: - return self._status(path) - except: - pass - - def _status(self, path): - buf = CoerceIO() - w = workingtree.WorkingTree.open(self.directory) - status.show_tree_status(w, specific_files=[path] if path else None, to_file=buf, short=True) - raw = buf.getvalue() - if not raw.strip(): - return - if path: - return raw[:2] - dirtied = untracked = ' ' - for line in raw.splitlines(): - if len(line) > 1 and line[1] in 'ACDMRIN': - dirtied = 'D' - elif line and line[0] == '?': - untracked = 'U' - ans = dirtied + untracked - return ans if ans.strip() else None - - def branch(self): - try: - b = branch.Branch.open(self.directory) - return b._get_nick(local=True) or None - except: - pass diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/git.py b/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/git.py deleted file mode 100644 index 033d893..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/git.py +++ /dev/null @@ -1,143 +0,0 @@ -# vim:fileencoding=utf-8:noet -try: - import pygit2 as git - - class Repository(object): - __slots__ = ('directory') - - def __init__(self, directory): - self.directory = directory - - def _repo(self): - return git.Repository(self.directory) - - def status(self, path=None): - '''Return status of repository or file. - - Without file argument: returns status of the repository: - - :First column: working directory status (D: dirty / space) - :Second column: index status (I: index dirty / space) - :Third column: presense of untracked files (U: untracked files / space) - :None: repository clean - - With file argument: returns status of this file. Output is - equivalent to the first two columns of "git status --porcelain" - (except for merge statuses as they are not supported by libgit2). - ''' - if path: - try: - status = self._repo().status_file(path) - except (KeyError, ValueError): - return None - - if status == git.GIT_STATUS_CURRENT: - return None - else: - if status & git.GIT_STATUS_WT_NEW: - return '??' - if status & git.GIT_STATUS_IGNORED: - return '!!' - - if status & git.GIT_STATUS_INDEX_NEW: - index_status = 'A' - elif status & git.GIT_STATUS_INDEX_DELETED: - index_status = 'D' - elif status & git.GIT_STATUS_INDEX_MODIFIED: - index_status = 'M' - else: - index_status = ' ' - - if status & git.GIT_STATUS_WT_DELETED: - wt_status = 'D' - elif status & git.GIT_STATUS_WT_MODIFIED: - wt_status = 'M' - else: - wt_status = ' ' - - return index_status + wt_status - else: - wt_column = ' ' - index_column = ' ' - untracked_column = ' ' - for status in self._repo().status().values(): - if status & git.GIT_STATUS_WT_NEW: - untracked_column = 'U' - continue - - if status & (git.GIT_STATUS_WT_DELETED - | git.GIT_STATUS_WT_MODIFIED): - wt_column = 'D' - - if status & (git.GIT_STATUS_INDEX_NEW - | git.GIT_STATUS_INDEX_MODIFIED - | git.GIT_STATUS_INDEX_DELETED): - index_column = 'I' - r = wt_column + index_column + untracked_column - return r if r != ' ' else None - - def branch(self): - try: - ref = self._repo().lookup_reference('HEAD') - except KeyError: - return None - - try: - target = ref.target - except ValueError: - return '[DETACHED HEAD]' - - if target.startswith('refs/heads/'): - return target[11:] - else: - return '[DETACHED HEAD]' -except ImportError: - from subprocess import Popen, PIPE - - def readlines(cmd, cwd): - p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, cwd=cwd) - p.stderr.close() - with p.stdout: - for line in p.stdout: - yield line[:-1].decode('utf-8') - - class Repository(object): - __slots__ = ('directory',) - - def __init__(self, directory): - self.directory = directory - - def _gitcmd(self, *args): - return readlines(('git',) + args, self.directory) - - def status(self, path=None): - if path: - try: - return next(self._gitcmd('status', '--porcelain', '--ignored', '--', path))[:2] - except StopIteration: - return None - else: - wt_column = ' ' - index_column = ' ' - untracked_column = ' ' - for line in self._gitcmd('status', '--porcelain'): - if line[0] == '?': - untracked_column = 'U' - continue - elif line[0] == '!': - continue - - if line[0] != ' ': - index_column = 'I' - - if line[1] != ' ': - wt_column = 'D' - - r = wt_column + index_column + untracked_column - return r if r != ' ' else None - - def branch(self): - for line in self._gitcmd('branch', '-l'): - if line[0] == '*': - return line[2:] - return None diff --git a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/mercurial.py b/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/mercurial.py deleted file mode 100644 index 246bdec..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lib/vcs/mercurial.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import absolute_import -from mercurial import hg, ui, match - - -class Repository(object): - __slots__ = ('directory', 'ui') - - statuses = 'MARDUI' - repo_statuses = (1, 1, 1, 1, 2) - repo_statuses_str = (None, 'D ', ' U', 'DU') - - def __init__(self, directory): - self.directory = directory - self.ui = ui.ui() - - def _repo(self): - # Cannot create this object once and use always: when repository updates - # functions emit invalid results - return hg.repository(self.ui, self.directory) - - def status(self, path=None): - '''Return status of repository or file. - - Without file argument: returns status of the repository: - - :"D?": dirty (tracked modified files: added, removed, deleted, modified), - :"?U": untracked-dirty (added, but not tracked files) - :None: clean (status is empty) - - With file argument: returns status of this file: "M"odified, "A"dded, - "R"emoved, "D"eleted (removed from filesystem, but still tracked), - "U"nknown, "I"gnored, (None)Clean. - ''' - repo = self._repo() - if path: - m = match.match(None, None, [path], exact=True) - statuses = repo.status(match=m, unknown=True, ignored=True) - for status, paths in zip(self.statuses, statuses): - if paths: - return status - return None - else: - resulting_status = 0 - for status, paths in zip(self.repo_statuses, repo.status(unknown=True)): - if paths: - resulting_status |= status - return self.repo_statuses_str[resulting_status] - - def branch(self): - return self._repo().dirstate.branch() diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/lint/__init__.py deleted file mode 100644 index 726f255..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/__init__.py +++ /dev/null @@ -1,1108 +0,0 @@ -from powerline.lint.markedjson import load -from powerline import find_config_file, Powerline -from powerline.lib.config import load_json_config -from powerline.lint.markedjson.error import echoerr, MarkedError -from powerline.segments.vim import vim_modes -from powerline.lint.inspect import getconfigargspec -from powerline.lint.markedjson.markedvalue import gen_marked_value -from powerline.lib.threaded import ThreadedSegment -import itertools -import sys -import os -import re -from collections import defaultdict -from copy import copy -import logging - - -try: - from __builtin__ import unicode -except ImportError: - unicode = str - - -def open_file(path): - return open(path, 'rb') - - -EMPTYTUPLE = tuple() - - -class JStr(unicode): - def join(self, iterable): - return super(JStr, self).join((unicode(item) for item in iterable)) - - -key_sep = JStr('/') -list_sep = JStr(', ') - - -def context_key(context): - return key_sep.join((c[0] for c in context)) - - -class DelayedEchoErr(object): - def __init__(self, echoerr): - self.echoerr = echoerr - self.errs = [] - - def __call__(self, *args, **kwargs): - self.errs.append((args, kwargs)) - - def echo_all(self): - for args, kwargs in self.errs: - self.echoerr(*args, **kwargs) - - def __nonzero__(self): - return not not self.errs - - -class Spec(object): - def __init__(self, **keys): - new_keys = {} - self.specs = list(keys.values()) - for k, v in keys.items(): - new_keys[k] = len(self.specs) - self.specs.append(v) - self.keys = new_keys - self.checks = [] - self.cmsg = '' - self.isoptional = False - self.uspecs = [] - self.ufailmsg = lambda key: 'found unknown key: {0}'.format(key) - if keys: - self.type(dict) - - def copy(self): - return self.__class__().update(self.__dict__) - - def update(self, d): - self.__dict__.update(d) - self.checks = copy(self.checks) - self.uspecs = copy(self.uspecs) - self.specs = [spec.copy() for spec in self.specs] - return self - - def unknown_spec(self, keyfunc, spec): - if isinstance(keyfunc, Spec): - self.specs.append(keyfunc) - keyfunc = len(self.specs) - 1 - self.specs.append(spec) - self.uspecs.append((keyfunc, len(self.specs) - 1)) - return self - - def unknown_msg(self, msgfunc): - self.ufailmsg = msgfunc - return self - - def context_message(self, msg): - self.cmsg = msg - for spec in self.specs: - if not spec.cmsg: - spec.context_message(msg) - return self - - def check_type(self, value, context_mark, data, context, echoerr, types): - if type(value.value) not in types: - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=context_mark, - problem='{0!r} must be a {1} instance, not {2}'.format( - value, - list_sep.join((t.__name__ for t in types)), - type(value.value).__name__ - ), - problem_mark=value.mark) - return False, True - return True, False - - def check_func(self, value, context_mark, data, context, echoerr, func, msg_func): - proceed, echo, hadproblem = func(value, data, context, echoerr) - if echo and hadproblem: - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=context_mark, - problem=msg_func(value), - problem_mark=value.mark) - return proceed, hadproblem - - def check_list(self, value, context_mark, data, context, echoerr, item_func, msg_func): - i = 0 - hadproblem = False - for item in value: - if isinstance(item_func, int): - spec = self.specs[item_func] - proceed, fhadproblem = spec.match(item, value.mark, data, context + (('list item ' + unicode(i), item),), echoerr) - else: - proceed, echo, fhadproblem = item_func(item, data, context, echoerr) - if echo and fhadproblem: - echoerr(context=self.cmsg.format(key=context_key(context) + '/list item ' + unicode(i)), - context_mark=value.mark, - problem=msg_func(item), - problem_mark=item.mark) - if fhadproblem: - hadproblem = True - if not proceed: - return proceed, hadproblem - i += 1 - return True, hadproblem - - def check_either(self, value, context_mark, data, context, echoerr, start, end): - new_echoerr = DelayedEchoErr(echoerr) - - hadproblem = False - for spec in self.specs[start:end]: - proceed, hadproblem = spec.match(value, value.mark, data, context, new_echoerr) - if not proceed: - break - if not hadproblem: - return True, False - - new_echoerr.echo_all() - - return False, hadproblem - - def check_tuple(self, value, context_mark, data, context, echoerr, start, end): - hadproblem = False - for (i, item, spec) in zip(itertools.count(), value, self.specs[start:end]): - proceed, ihadproblem = spec.match(item, value.mark, data, context + (('tuple item ' + unicode(i), item),), echoerr) - if ihadproblem: - hadproblem = True - if not proceed: - return False, hadproblem - return True, hadproblem - - def type(self, *args): - self.checks.append(('check_type', args)) - return self - - cmp_funcs = { - 'le': lambda x, y: x <= y, - 'lt': lambda x, y: x < y, - 'ge': lambda x, y: x >= y, - 'gt': lambda x, y: x > y, - 'eq': lambda x, y: x == y, - } - - cmp_msgs = { - 'le': 'lesser or equal to', - 'lt': 'lesser then', - 'ge': 'greater or equal to', - 'gt': 'greater then', - 'eq': 'equal to', - } - - def len(self, comparison, cint, msg_func=None): - cmp_func = self.cmp_funcs[comparison] - msg_func = msg_func or (lambda value: 'length of {0!r} is not {1} {2}'.format(value, self.cmp_msgs[comparison], cint)) - self.checks.append(('check_func', - (lambda value, *args: (True, True, not cmp_func(len(value), cint))), - msg_func)) - return self - - def cmp(self, comparison, cint, msg_func=None): - if type(cint) is str: - self.type(unicode) - elif type(cint) is float: - self.type(int, float) - else: - self.type(type(cint)) - cmp_func = self.cmp_funcs[comparison] - msg_func = msg_func or (lambda value: '{0} is not {1} {2}'.format(value, self.cmp_msgs[comparison], cint)) - self.checks.append(('check_func', - (lambda value, *args: (True, True, not cmp_func(value.value, cint))), - msg_func)) - return self - - def unsigned(self, msg_func=None): - self.type(int) - self.checks.append(('check_func', - (lambda value, *args: (True, True, value < 0)), - lambda value: '{0} must be greater then zero'.format(value))) - return self - - def list(self, item_func, msg_func=None): - self.type(list) - if isinstance(item_func, Spec): - self.specs.append(item_func) - item_func = len(self.specs) - 1 - self.checks.append(('check_list', item_func, msg_func or (lambda item: 'failed check'))) - return self - - def tuple(self, *specs): - self.type(list) - - max_len = len(specs) - min_len = max_len - for spec in reversed(specs): - if spec.isoptional: - min_len -= 1 - else: - break - if max_len == min_len: - self.len('eq', len(specs)) - else: - self.len('ge', min_len) - self.len('le', max_len) - - start = len(self.specs) - for i, spec in zip(itertools.count(), specs): - self.specs.append(spec) - self.checks.append(('check_tuple', start, len(self.specs))) - return self - - def func(self, func, msg_func=None): - self.checks.append(('check_func', func, msg_func or (lambda value: 'failed check'))) - return self - - def re(self, regex, msg_func=None): - self.type(unicode) - compiled = re.compile(regex) - msg_func = msg_func or (lambda value: 'String "{0}" does not match "{1}"'.format(value, regex)) - self.checks.append(('check_func', - (lambda value, *args: (True, True, not compiled.match(value.value))), - msg_func)) - return self - - def ident(self, msg_func=None): - msg_func = msg_func or (lambda value: 'String "{0}" is not an alphanumeric/underscore identifier'.format(value)) - return self.re('^\w+$', msg_func) - - def oneof(self, collection, msg_func=None): - msg_func = msg_func or (lambda value: '"{0}" must be one of {1!r}'.format(value, list(collection))) - self.checks.append(('check_func', - lambda value, *args: (True, True, value not in collection), - msg_func)) - return self - - def error(self, msg): - self.checks.append(('check_func', lambda *args: (True, True, True), - lambda value: msg.format(value))) - return self - - def either(self, *specs): - start = len(self.specs) - self.specs.extend(specs) - self.checks.append(('check_either', start, len(self.specs))) - return self - - def optional(self): - self.isoptional = True - return self - - def match_checks(self, *args): - hadproblem = False - for check in self.checks: - proceed, chadproblem = getattr(self, check[0])(*(args + check[1:])) - if chadproblem: - hadproblem = True - if not proceed: - return False, hadproblem - return True, hadproblem - - def match(self, value, context_mark=None, data=None, context=EMPTYTUPLE, echoerr=echoerr): - proceed, hadproblem = self.match_checks(value, context_mark, data, context, echoerr) - if proceed: - if self.keys or self.uspecs: - for key, vali in self.keys.items(): - valspec = self.specs[vali] - if key in value: - proceed, mhadproblem = valspec.match(value[key], value.mark, data, context + ((key, value[key]),), echoerr) - if mhadproblem: - hadproblem = True - if not proceed: - return False, hadproblem - else: - if not valspec.isoptional: - hadproblem = True - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=None, - problem='required key is missing: {0}'.format(key), - problem_mark=value.mark) - for key in value.keys(): - if key not in self.keys: - for keyfunc, vali in self.uspecs: - valspec = self.specs[vali] - if isinstance(keyfunc, int): - spec = self.specs[keyfunc] - proceed, khadproblem = spec.match(key, context_mark, data, context, echoerr) - else: - proceed, khadproblem = keyfunc(key, data, context, echoerr) - if khadproblem: - hadproblem = True - if proceed: - valspec.match(value[key], value.mark, data, context + ((key, value[key]),), echoerr) - break - else: - hadproblem = True - if self.ufailmsg: - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=None, - problem=self.ufailmsg(key), - problem_mark=key.mark) - return True, hadproblem - - -class WithPath(object): - def __init__(self, import_paths): - self.import_paths = import_paths - - def __enter__(self): - self.oldpath = sys.path - sys.path = self.import_paths + sys.path - - def __exit__(self, *args): - sys.path = self.oldpath - - -def check_matcher_func(ext, match_name, data, context, echoerr): - import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])] - - match_module, separator, match_function = match_name.rpartition('.') - if not separator: - match_module = 'powerline.matchers.{0}'.format(ext) - match_function = match_name - with WithPath(import_paths): - try: - func = getattr(__import__(match_module, fromlist=[match_function]), unicode(match_function)) - except ImportError: - echoerr(context='Error while loading matcher functions', - problem='failed to load module {0}'.format(match_module), - problem_mark=match_name.mark) - return True, True - except AttributeError: - echoerr(context='Error while loading matcher functions', - problem='failed to load matcher function {0}'.format(match_function), - problem_mark=match_name.mark) - return True, True - - if not callable(func): - echoerr(context='Error while loading matcher functions', - problem='loaded "function" {0} is not callable'.format(match_function), - problem_mark=match_name.mark) - return True, True - - if hasattr(func, 'func_code') and hasattr(func.func_code, 'co_argcount'): - if func.func_code.co_argcount != 1: - echoerr(context='Error while loading matcher functions', - problem='function {0} accepts {1} arguments instead of 1. Are you sure it is the proper function?'.format(match_function, func.func_code.co_argcount), - problem_mark=match_name.mark) - - return True, False - - -def check_ext(ext, data, context, echoerr): - hadsomedirs = False - hadproblem = False - for subdir in ('themes', 'colorschemes'): - if ext not in data['configs'][subdir]: - hadproblem = True - echoerr(context='Error while loading {0} extension configuration'.format(ext), - context_mark=ext.mark, - problem='{0} configuration does not exist'.format(subdir)) - else: - hadsomedirs = True - return hadsomedirs, hadproblem - - -def check_config(d, theme, data, context, echoerr): - if len(context) == 4: - ext = context[-2][0] - else: - # local_themes - ext = context[-3][0] - if ext not in data['configs'][d] or theme not in data['configs'][d][ext]: - echoerr(context='Error while loading {0} from {1} extension configuration'.format(d[:-1], ext), - problem='failed to find configuration file {0}/{1}/{2}.json'.format(d, ext, theme), - problem_mark=theme.mark) - return True, False, True - return True, False, False - - -divider_spec = Spec().type(unicode).len('le', 3, - lambda value: 'Divider {0!r} is too large!'.format(value)).copy -divside_spec = Spec( - hard=divider_spec(), - soft=divider_spec(), -).copy -colorscheme_spec = Spec().type(unicode).func(lambda *args: check_config('colorschemes', *args)).copy -theme_spec = Spec().type(unicode).func(lambda *args: check_config('themes', *args)).copy -main_spec = (Spec( - common=Spec( - dividers=Spec( - left=divside_spec(), - right=divside_spec(), - ), - spaces=Spec().unsigned().cmp('le', 2, - lambda value: 'Are you sure you need such a big ({0}) number of spaces?'.format(value)), - term_truecolor=Spec().type(bool).optional(), - # Python is capable of loading from zip archives. Thus checking path - # only for existence of the path, not for it being a directory - paths=Spec().list((lambda value, *args: (True, True, not os.path.exists(value.value))), - lambda value: 'path does not exist: {0}'.format(value)).optional(), - log_file=Spec().type(str).func(lambda value, *args: (True, True, not os.path.isdir(os.path.dirname(value))), - lambda value: 'directory does not exist: {0}'.format(os.path.dirname(value))).optional(), - log_level=Spec().re('^[A-Z]+$').func(lambda value, *args: (True, True, not hasattr(logging, value)), - lambda value: 'unknown debugging level {0}'.format(value)).optional(), - log_format=Spec().type(str).optional(), - interval=Spec().either(Spec().cmp('gt', 0.0), Spec().type(type(None))).optional(), - reload_config=Spec().type(bool).optional(), - ).context_message('Error while loading common configuration (key {key})'), - ext=Spec( - vim=Spec( - colorscheme=colorscheme_spec(), - theme=theme_spec(), - local_themes=Spec() - .unknown_spec(lambda *args: check_matcher_func('vim', *args), theme_spec()) - ).optional(), - ipython=Spec( - colorscheme=colorscheme_spec(), - theme=theme_spec(), - local_themes=Spec( - in2=theme_spec(), - out=theme_spec(), - rewrite=theme_spec(), - ), - ).optional(), - ).unknown_spec(check_ext, - Spec( - colorscheme=colorscheme_spec(), - theme=theme_spec(), - )) - .context_message('Error while loading extensions configuration (key {key})'), -).context_message('Error while loading main configuration')) - -term_color_spec = Spec().unsigned().cmp('le', 255).copy -true_color_spec = Spec().re('^[0-9a-fA-F]{6}$', - lambda value: '"{0}" is not a six-digit hexadecimal unsigned integer written as a string'.format(value)).copy -colors_spec = (Spec( - colors=Spec().unknown_spec( - Spec().ident(), - Spec().either( - Spec().tuple(term_color_spec(), true_color_spec()), - term_color_spec())) - .context_message('Error while checking colors (key {key})'), - gradients=Spec().unknown_spec( - Spec().ident(), - Spec().tuple( - Spec().len('gt', 1).list(term_color_spec()), - Spec().len('gt', 1).list(true_color_spec()).optional(), - ) - ).context_message('Error while checking gradients (key {key})'), -).context_message('Error while loading colors configuration')) - - -def check_color(color, data, context, echoerr): - if color not in data['colors_config'].get('colors', {}) and color not in data['colors_config'].get('gradients', {}): - echoerr(context='Error while checking highlight group in colorscheme (key {key})'.format(key=context_key(context)), - problem='found unexistent color or gradient {0}'.format(color), - problem_mark=color.mark) - return True, False, True - return True, False, False - - -def check_translated_group_name(group, data, context, echoerr): - if group not in context[0][1].get('groups', {}): - echoerr(context='Error while checking translated group in colorscheme (key {key})'.format(key=context_key(context)), - problem='translated group {0} is not in main groups dictionary'.format(group), - problem_mark=group.mark) - return True, False, True - return True, False, False - - -color_spec = Spec().type(unicode).func(check_color).copy -name_spec = Spec().type(unicode).len('gt', 0).optional().copy -group_spec = Spec( - fg=color_spec(), - bg=color_spec(), - attr=Spec().list(Spec().type(unicode).oneof(set(('bold', 'italic', 'underline')))).optional(), -).copy -group_name_spec = Spec().re('^\w+(?::\w+)?$').copy -groups_spec = Spec().unknown_spec( - group_name_spec(), - group_spec(), -).copy -colorscheme_spec = (Spec( - name=name_spec(), - groups=groups_spec().context_message('Error while loading groups (key {key})'), -).context_message('Error while loading coloscheme')) -vim_mode_spec = Spec().oneof(set(list(vim_modes) + ['nc'])).copy -vim_colorscheme_spec = (Spec( - name=name_spec(), - groups=groups_spec().context_message('Error while loading groups (key {key})'), - mode_translations=Spec().unknown_spec( - vim_mode_spec(), - Spec( - colors=Spec().unknown_spec( - color_spec(), - color_spec(), - ).optional(), - groups=Spec().unknown_spec( - group_name_spec().func(check_translated_group_name), - group_spec(), - ).optional(), - ), - ).context_message('Error while loading mode translations (key {key})'), -).context_message('Error while loading vim colorscheme')) - - -generic_keys = set(('exclude_modes', 'include_modes', 'width', 'align', 'name', 'draw_soft_divider', 'draw_hard_divider', 'priority', 'after', 'before')) -type_keys = { - 'function': set(('args', 'module', 'draw_inner_divider')), - 'string': set(('contents', 'type', 'highlight_group', 'divider_highlight_group')), - 'filler': set(('type', 'highlight_group', 'divider_highlight_group')), - } -required_keys = { - 'function': set(), - 'string': set(('contents',)), - 'filler': set(), - } -function_keys = set(('args', 'module')) -highlight_keys = set(('highlight_group', 'name')) - - -def check_key_compatibility(segment, data, context, echoerr): - segment_type = segment.get('type', 'function') - - if segment_type not in type_keys: - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='found segment with unknown type {0}'.format(segment_type), - problem_mark=segment_type.mark) - return False, False, True - - hadproblem = False - - keys = set(segment) - if not ((keys - generic_keys) < type_keys[segment_type]): - unknown_keys = keys - generic_keys - type_keys[segment_type] - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=context[-1][1].mark, - problem='found keys not used with the current segment type: {0}'.format( - list_sep.join(unknown_keys)), - problem_mark=list(unknown_keys)[0].mark) - hadproblem = True - - if not (keys > required_keys[segment_type]): - missing_keys = required_keys[segment_type] - keys - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=context[-1][1].mark, - problem='found missing required keys: {0}'.format( - list_sep.join(missing_keys))) - hadproblem = True - - if not (segment_type == 'function' or (keys & highlight_keys)): - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=context[-1][1].mark, - problem='found missing keys required to determine highlight group. Either highlight_group or name key must be present') - hadproblem = True - - return True, False, hadproblem - - -def check_segment_module(module, data, context, echoerr): - with WithPath(data['import_paths']): - try: - __import__(unicode(module)) - except ImportError: - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='failed to import module {0}'.format(module), - problem_mark=module.mark) - return True, False, True - return True, False, False - - -def check_full_segment_data(segment, data, context, echoerr): - if 'name' not in segment: - return True, False, False - - ext = data['ext'] - theme_segment_data = context[0][1].get('segment_data', {}) - top_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) - if not top_theme_name or data['theme'] == top_theme_name: - top_segment_data = {} - else: - top_segment_data = data['ext_theme_configs'].get(top_theme_name, {}).get('segment_data', {}) - - names = [segment['name']] - if segment.get('type', 'function') == 'function': - module = segment.get('module', context[0][1].get('default_module', 'powerline.segments.' + ext)) - names.insert(0, unicode(module) + '.' + unicode(names[0])) - - segment_copy = segment.copy() - - for key in ('before', 'after', 'args', 'contents'): - if key not in segment_copy: - for segment_data in [theme_segment_data, top_segment_data]: - for name in names: - try: - val = segment_data[name][key] - # HACK to keep marks - l = list(segment_data[name]) - k = l[l.index(key)] - segment_copy[k] = val - except KeyError: - pass - - return check_key_compatibility(segment_copy, data, context, echoerr) - - -def import_segment(name, data, context, echoerr, module=None): - if not module: - module = context[-2][1].get('module', context[0][1].get('default_module', 'powerline.segments.' + data['ext'])) - - with WithPath(data['import_paths']): - try: - func = getattr(__import__(unicode(module), fromlist=[unicode(name)]), unicode(name)) - except ImportError: - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='failed to import module {0}'.format(module), - problem_mark=module.mark) - return None - except AttributeError: - echoerr(context='Error while loading segment function (key {key})'.format(key=context_key(context)), - problem='failed to load function {0} from module {1}'.format(name, module), - problem_mark=name.mark) - return None - - if not callable(func): - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='imported "function" {0} from module {1} is not callable'.format(name, module), - problem_mark=module.mark) - return None - - return func - - -def check_segment_name(name, data, context, echoerr): - ext = data['ext'] - if context[-2][1].get('type', 'function') == 'function': - func = import_segment(name, data, context, echoerr) - - if not func: - return True, False, True - - hl_groups = [] - divider_hl_group = None - - if func.__doc__: - H_G_USED_STR = 'Highlight groups used: ' - D_H_G_USED_STR = 'Divider highlight group used: ' - for line in func.__doc__.split('\n'): - if H_G_USED_STR in line: - hl_groups.append(line[line.index(H_G_USED_STR) + len(H_G_USED_STR):]) - elif D_H_G_USED_STR in line: - divider_hl_group = line[line.index(D_H_G_USED_STR) + len(D_H_G_USED_STR) + 2:-3] - - hadproblem = False - - if divider_hl_group: - r = hl_exists(divider_hl_group, data, context, echoerr, allow_gradients=True) - if r: - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}\n(Group name was obtained from function documentation.)'.format( - divider_hl_group, list_sep.join(r)), - problem_mark=name.mark) - hadproblem = True - - if hl_groups: - greg = re.compile(r'``([^`]+)``( \(gradient\))?') - hl_groups = [[greg.match(subs).groups() for subs in s.split(' or ')] for s in (list_sep.join(hl_groups)).split(', ')] - for required_pack in hl_groups: - rs = [hl_exists(hl_group, data, context, echoerr, allow_gradients=('force' if gradient else False)) - for hl_group, gradient in required_pack] - if all(rs): - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight groups list ({0}) with all groups not defined in some colorschemes\n(Group names were taken from function documentation.)'.format( - list_sep.join((h[0] for h in required_pack))), - problem_mark=name.mark) - for r, h in zip(rs, required_pack): - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( - h[0], list_sep.join(r))) - hadproblem = True - else: - r = hl_exists(name, data, context, echoerr, allow_gradients=True) - if r: - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}\n(If not specified otherwise in documentation, highlight group for function segments\nis the same as the function name.)'.format( - name, list_sep.join(r)), - problem_mark=name.mark) - hadproblem = True - - return True, False, hadproblem - else: - if name not in context[0][1].get('segment_data', {}): - top_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) - if data['theme'] == top_theme_name: - top_theme = {} - else: - top_theme = data['ext_theme_configs'].get(top_theme_name, {}) - if name not in top_theme.get('segment_data', {}): - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='found useless use of name key (such name is not present in theme/segment_data)', - problem_mark=name.mark) - - return True, False, False - - -def hl_exists(hl_group, data, context, echoerr, allow_gradients=False): - ext = data['ext'] - if ext not in data['colorscheme_configs']: - # No colorschemes. Error was already reported, no need to report it - # twice - return [] - r = [] - for colorscheme, cconfig in data['colorscheme_configs'][ext].items(): - if hl_group not in cconfig.get('groups', {}): - r.append(colorscheme) - elif not allow_gradients or allow_gradients == 'force': - group_config = cconfig['groups'][hl_group] - hadgradient = False - for ckey in ('fg', 'bg'): - color = group_config.get(ckey) - if not color: - # No color. Error was already reported. - continue - # Gradients are only allowed for function segments. Note that - # whether *either* color or gradient exists should have been - # already checked - hascolor = color in data['colors_config'].get('colors', {}) - hasgradient = color in data['colors_config'].get('gradients', {}) - if hasgradient: - hadgradient = True - if allow_gradients is False and not hascolor and hasgradient: - echoerr(context='Error while checking highlight group in theme (key {key})'.format(key=context_key(context)), - context_mark=getattr(hl_group, 'mark', None), - problem='group {0} is using gradient {1} instead of a color'.format(hl_group, color), - problem_mark=color.mark) - r.append(colorscheme) - continue - if allow_gradients == 'force' and not hadgradient: - echoerr(context='Error while checking highlight group in theme (key {key})'.format(key=context_key(context)), - context_mark=getattr(hl_group, 'mark', None), - problem='group {0} should have at least one gradient color, but it has no'.format(hl_group), - problem_mark=group_config.mark) - r.append(colorscheme) - return r - - -def check_highlight_group(hl_group, data, context, echoerr): - r = hl_exists(hl_group, data, context, echoerr) - if r: - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( - hl_group, list_sep.join(r)), - problem_mark=hl_group.mark) - return True, False, True - return True, False, False - - -def check_highlight_groups(hl_groups, data, context, echoerr): - rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups] - if all(rs): - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format( - list_sep.join((unicode(h) for h in hl_groups))), - problem_mark=hl_groups.mark) - for r, hl_group in zip(rs, hl_groups): - echoerr(context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( - hl_group, list_sep.join(r)), - problem_mark=hl_group.mark) - return True, False, True - return True, False, False - - -def check_segment_data_key(key, data, context, echoerr): - ext = data['ext'] - top_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) - is_top_theme = (data['theme'] == top_theme_name) - if is_top_theme: - themes = data['ext_theme_configs'].values() - else: - themes = [context[0][1]] - - for theme in themes: - for segments in theme.get('segments', {}).values(): - found = False - for segment in segments: - if 'name' in segment: - if key == segment['name']: - found = True - module = segment.get('module', theme.get('default_module', 'powerline.segments.' + ext)) - if key == unicode(module) + '.' + unicode(segment['name']): - found = True - if found: - break - if found: - break - else: - echoerr(context='Error while checking segment data', - problem='found key {0} that cannot be associated with any segment'.format(key), - problem_mark=key.mark) - return True, False, True - - return True, False, False - - -threaded_args_specs = { - 'interval': Spec().cmp('gt', 0.0), - 'update_first': Spec().type(bool), - 'shutdown_event': Spec().error('Shutdown event must be set by powerline'), -} - - -def check_args_variant(segment, args, data, context, echoerr): - argspec = getconfigargspec(segment) - present_args = set(args) - all_args = set(argspec.args) - required_args = set(argspec.args[:-len(argspec.defaults)]) - - hadproblem = False - - if required_args - present_args: - echoerr(context='Error while checking segment arguments (key {key})'.format(key=context_key(context)), - context_mark=args.mark, - problem='some of the required keys are missing: {0}'.format(list_sep.join(required_args - present_args))) - hadproblem = True - - if not all_args >= present_args: - echoerr(context='Error while checking segment arguments (key {key})'.format(key=context_key(context)), - context_mark=args.mark, - problem='found unknown keys: {0}'.format(list_sep.join(present_args - all_args)), - problem_mark=next(iter(present_args - all_args)).mark) - hadproblem = True - - if isinstance(segment, ThreadedSegment): - for key in set(threaded_args_specs) & present_args: - proceed, khadproblem = threaded_args_specs[key].match(args[key], args.mark, data, context + ((key, args[key]),), echoerr) - if khadproblem: - hadproblem = True - if not proceed: - return hadproblem - - return hadproblem - - -def check_args(get_segment_variants, args, data, context, echoerr): - new_echoerr = DelayedEchoErr(echoerr) - count = 0 - hadproblem = False - for segment in get_segment_variants(data, context, new_echoerr): - count += 1 - shadproblem = check_args_variant(segment, args, data, context, echoerr) - if shadproblem: - hadproblem = True - - if not count: - hadproblem = True - if new_echoerr: - new_echoerr.echo_all() - else: - echoerr(context='Error while checking segment arguments (key {key})'.format(key=context_key(context)), - context_mark=context[-2][1].mark, - problem='no suitable segments found') - - return True, False, hadproblem - - -def get_one_segment_variant(data, context, echoerr): - name = context[-2][1].get('name') - if name: - func = import_segment(name, data, context, echoerr) - if func: - yield func - - -def get_all_possible_segments(data, context, echoerr): - name = context[-2][0] - module, name = name.rpartition('.')[::2] - if module: - func = import_segment(name, data, context, echoerr, module=module) - if func: - yield func - else: - for theme_config in data['ext_theme_configs'].values(): - for segments in theme_config.get('segments', {}).values(): - for segment in segments: - if segment.get('type', 'function') == 'function': - module = segment.get('module', context[0][1].get('default_module', 'powerline.segments.' + data['ext'])) - func = import_segment(name, data, context, echoerr, module=module) - if func: - yield func - - -args_spec = Spec( - pl=Spec().error('pl object must be set by powerline').optional(), - segment_info=Spec().error('Segment info dictionary must be set by powerline').optional(), -).unknown_spec(Spec(), Spec()).optional().copy -highlight_group_spec = Spec().type(unicode).copy -segment_module_spec = Spec().type(unicode).func(check_segment_module).optional().copy -segments_spec = Spec().optional().list( - Spec( - type=Spec().oneof(type_keys).optional(), - name=Spec().re('^[a-zA-Z_]\w+$').func(check_segment_name).optional(), - exclude_modes=Spec().list(vim_mode_spec()).optional(), - include_modes=Spec().list(vim_mode_spec()).optional(), - draw_hard_divider=Spec().type(bool).optional(), - draw_soft_divider=Spec().type(bool).optional(), - draw_inner_divider=Spec().type(bool).optional(), - module=segment_module_spec(), - priority=Spec().type(int, float, type(None)).optional(), - after=Spec().type(unicode).optional(), - before=Spec().type(unicode).optional(), - width=Spec().either(Spec().unsigned(), Spec().cmp('eq', 'auto')).optional(), - align=Spec().oneof(set('lr')).optional(), - args=args_spec().func(lambda *args, **kwargs: check_args(get_one_segment_variant, *args, **kwargs)), - contents=Spec().type(unicode).optional(), - highlight_group=Spec().list( - highlight_group_spec().re('^(?:(?!:divider$).)+$', - lambda value: 'it is recommended that only divider highlight group names end with ":divider"') - ).func(check_highlight_groups).optional(), - divider_highlight_group=highlight_group_spec().func(check_highlight_group).re(':divider$', - lambda value: 'it is recommended that divider highlight group names end with ":divider"').optional(), - ).func(check_full_segment_data), -).copy -theme_spec = (Spec( - default_module=segment_module_spec(), - segment_data=Spec().unknown_spec( - Spec().func(check_segment_data_key), - Spec( - after=Spec().type(unicode).optional(), - before=Spec().type(unicode).optional(), - args=args_spec().func(lambda *args, **kwargs: check_args(get_all_possible_segments, *args, **kwargs)), - contents=Spec().type(unicode).optional(), - ), - ).optional().context_message('Error while loading segment data (key {key})'), - segments=Spec( - left=segments_spec().context_message('Error while loading segments from left side (key {key})'), - right=segments_spec().context_message('Error while loading segments from right side (key {key})'), - ).func( - lambda value, *args: (True, True, not (('left' in value) or ('right' in value))), - lambda value: 'segments dictionary must contain either left, right or both keys' - ).context_message('Error while loading segments (key {key})'), -).context_message('Error while loading theme')) - - -def check(path=None): - search_paths = [path] if path else Powerline.get_config_paths() - - dirs = { - 'themes': defaultdict(lambda: []), - 'colorschemes': defaultdict(lambda: []) - } - for path in reversed(search_paths): - for subdir in ('themes', 'colorschemes'): - d = os.path.join(path, subdir) - if os.path.isdir(d): - for ext in os.listdir(d): - extd = os.path.join(d, ext) - if os.path.isdir(extd): - dirs[subdir][ext].append(extd) - elif os.path.exists(d): - hadproblem = True - sys.stderr.write('Path {0} is supposed to be a directory, but it is not\n'.format(d)) - - configs = { - 'themes': defaultdict(lambda: {}), - 'colorschemes': defaultdict(lambda: {}) - } - for subdir in ('themes', 'colorschemes'): - for ext in dirs[subdir]: - for d in dirs[subdir][ext]: - for config in os.listdir(d): - if config.endswith('.json'): - configs[subdir][ext][config[:-5]] = os.path.join(d, config) - - diff = set(configs['themes']) ^ set(configs['colorschemes']) - if diff: - hadproblem = True - for ext in diff: - sys.stderr.write('{0} extension {1} present only in {2}\n'.format( - ext, - 'configuration' if (ext in dirs['themes'] and ext in dirs['colorschemes']) else 'directory', - 'themes' if ext in configs['themes'] else 'colorschemes', - )) - - lhadproblem = [False] - - def load_config(stream): - r, hadproblem = load(stream) - if hadproblem: - lhadproblem[0] = True - return r - - hadproblem = False - try: - main_config = load_json_config(find_config_file(search_paths, 'config'), load=load_config, open_file=open_file) - except IOError: - main_config = {} - sys.stderr.write('\nConfiguration file not found: config.json\n') - hadproblem = True - except MarkedError as e: - main_config = {} - sys.stderr.write(str(e) + '\n') - hadproblem = True - else: - if main_spec.match(main_config, data={'configs': configs}, context=(('', main_config),))[1]: - hadproblem = True - - import_paths = [os.path.expanduser(path) for path in main_config.get('common', {}).get('paths', [])] - - try: - colors_config = load_json_config(find_config_file(search_paths, 'colors'), load=load_config, open_file=open_file) - except IOError: - colors_config = {} - sys.stderr.write('\nConfiguration file not found: colors.json\n') - hadproblem = True - except MarkedError as e: - colors_config = {} - sys.stderr.write(str(e) + '\n') - hadproblem = True - else: - if colors_spec.match(colors_config, context=(('', colors_config),))[1]: - hadproblem = True - - if lhadproblem[0]: - hadproblem = True - - colorscheme_configs = defaultdict(lambda: {}) - for ext in configs['colorschemes']: - data = {'ext': ext, 'colors_config': colors_config} - for colorscheme, cfile in configs['colorschemes'][ext].items(): - with open_file(cfile) as config_file_fp: - try: - config, lhadproblem = load(config_file_fp) - except MarkedError as e: - sys.stderr.write(str(e) + '\n') - hadproblem = True - continue - if lhadproblem: - hadproblem = True - colorscheme_configs[ext][colorscheme] = config - if ext == 'vim': - spec = vim_colorscheme_spec - else: - spec = colorscheme_spec - if spec.match(config, context=(('', config),), data=data)[1]: - hadproblem = True - - theme_configs = defaultdict(lambda: {}) - for ext in configs['themes']: - for theme, sfile in configs['themes'][ext].items(): - with open_file(sfile) as config_file_fp: - try: - config, lhadproblem = load(config_file_fp) - except MarkedError as e: - sys.stderr.write(str(e) + '\n') - hadproblem = True - continue - if lhadproblem: - hadproblem = True - theme_configs[ext][theme] = config - for ext, configs in theme_configs.items(): - data = {'ext': ext, 'colorscheme_configs': colorscheme_configs, 'import_paths': import_paths, - 'main_config': main_config, 'ext_theme_configs': configs, 'colors_config': colors_config} - for theme, config in configs.items(): - data['theme'] = theme - if theme_spec.match(config, context=(('', config),), data=data)[1]: - hadproblem = True - return hadproblem diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/inspect.py b/common/.local/lib/python2.7/site-packages/powerline/lint/inspect.py deleted file mode 100644 index b0f0c7a..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/inspect.py +++ /dev/null @@ -1,59 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import absolute_import -from inspect import ArgSpec, getargspec -from powerline.lib.threaded import ThreadedSegment, KwThreadedSegment -from itertools import count - -def getconfigargspec(obj): - if isinstance(obj, ThreadedSegment): - args = ['interval'] - defaults = [getattr(obj, 'interval', 1)] - if obj.update_first: - args.append('update_first') - defaults.append(True) - methods = ['render', 'set_state'] - if isinstance(obj, KwThreadedSegment): - methods += ['key', 'render_one'] - - for method in methods: - if hasattr(obj, method): - # Note: on = i: - default = argspec.defaults[-i] - defaults.append(default) - args.append(arg) - else: - args.insert(0, arg) - argspec = ArgSpec(args=args, varargs=None, keywords=None, defaults=tuple(defaults)) - else: - if hasattr(obj, 'powerline_origin'): - obj = obj.powerline_origin - else: - obj = obj - - argspec = getargspec(obj) - args = [] - defaults = [] - for i, arg in zip(count(1), reversed(argspec.args)): - if ((arg == 'segment_info' and getattr(obj, 'powerline_requires_segment_info', None)) or - arg == 'pl'): - continue - if argspec.defaults and len(argspec.defaults) >= i: - default = argspec.defaults[-i] - defaults.append(default) - args.append(arg) - else: - args.insert(0, arg) - argspec = ArgSpec(args=args, varargs=argspec.varargs, keywords=argspec.keywords, defaults=tuple(defaults)) - - return argspec diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/__init__.py deleted file mode 100644 index f8ef748..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -__version__ = '3.10' - - -from .loader import Loader - - -def load(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - """ - loader = Loader(stream) - try: - r = loader.get_single_data() - return r, loader.haserrors - finally: - loader.dispose() diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/composer.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/composer.py deleted file mode 100644 index 303e6f2..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/composer.py +++ /dev/null @@ -1,117 +0,0 @@ -__all__ = ['Composer', 'ComposerError'] - -from .error import MarkedError -from .events import * # NOQA -from .nodes import * # NOQA - - -class ComposerError(MarkedError): - pass - - -class Composer: - def __init__(self): - pass - - def check_node(self): - # Drop the STREAM-START event. - if self.check_event(StreamStartEvent): - self.get_event() - - # If there are more documents available? - return not self.check_event(StreamEndEvent) - - def get_node(self): - # Get the root node of the next document. - if not self.check_event(StreamEndEvent): - return self.compose_document() - - def get_single_node(self): - # Drop the STREAM-START event. - self.get_event() - - # Compose a document if the stream is not empty. - document = None - if not self.check_event(StreamEndEvent): - document = self.compose_document() - - # Ensure that the stream contains no more documents. - if not self.check_event(StreamEndEvent): - event = self.get_event() - raise ComposerError("expected a single document in the stream", - document.start_mark, "but found another document", - event.start_mark) - - # Drop the STREAM-END event. - self.get_event() - - return document - - def compose_document(self): - # Drop the DOCUMENT-START event. - self.get_event() - - # Compose the root node. - node = self.compose_node(None, None) - - # Drop the DOCUMENT-END event. - self.get_event() - - return node - - def compose_node(self, parent, index): - self.descend_resolver(parent, index) - if self.check_event(ScalarEvent): - node = self.compose_scalar_node() - elif self.check_event(SequenceStartEvent): - node = self.compose_sequence_node() - elif self.check_event(MappingStartEvent): - node = self.compose_mapping_node() - self.ascend_resolver() - return node - - def compose_scalar_node(self): - event = self.get_event() - tag = event.tag - if tag is None or tag == '!': - tag = self.resolve(ScalarNode, event.value, event.implicit, event.start_mark) - node = ScalarNode(tag, event.value, - event.start_mark, event.end_mark, style=event.style) - return node - - def compose_sequence_node(self): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == '!': - tag = self.resolve(SequenceNode, None, start_event.implicit) - node = SequenceNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - index = 0 - while not self.check_event(SequenceEndEvent): - node.value.append(self.compose_node(node, index)) - index += 1 - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node - - def compose_mapping_node(self): - start_event = self.get_event() - tag = start_event.tag - if tag is None or tag == '!': - tag = self.resolve(MappingNode, None, start_event.implicit) - node = MappingNode(tag, [], - start_event.start_mark, None, - flow_style=start_event.flow_style) - while not self.check_event(MappingEndEvent): - #key_event = self.peek_event() - item_key = self.compose_node(node, None) - #if item_key in node.value: - # raise ComposerError("while composing a mapping", start_event.start_mark, - # "found duplicate key", key_event.start_mark) - item_value = self.compose_node(node, item_key) - #node.value[item_key] = item_value - node.value.append((item_key, item_value)) - end_event = self.get_event() - node.end_mark = end_event.end_mark - return node diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/constructor.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/constructor.py deleted file mode 100644 index bdc5c6e..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/constructor.py +++ /dev/null @@ -1,274 +0,0 @@ -__all__ = ['BaseConstructor', 'Constructor', 'ConstructorError'] - -from .error import MarkedError -from .nodes import * # NOQA -from .markedvalue import gen_marked_value - -import collections -import types - -from functools import wraps - - -try: - from __builtin__ import unicode -except ImportError: - unicode = str # NOQA - - -def marked(func): - @wraps(func) - def f(self, node, *args, **kwargs): - return gen_marked_value(func(self, node, *args, **kwargs), node.start_mark) - return f - - -class ConstructorError(MarkedError): - pass - - -class BaseConstructor: - yaml_constructors = {} - - def __init__(self): - self.constructed_objects = {} - self.state_generators = [] - self.deep_construct = False - - def check_data(self): - # If there are more documents available? - return self.check_node() - - def get_data(self): - # Construct and return the next document. - if self.check_node(): - return self.construct_document(self.get_node()) - - def get_single_data(self): - # Ensure that the stream contains a single document and construct it. - node = self.get_single_node() - if node is not None: - return self.construct_document(node) - return None - - def construct_document(self, node): - data = self.construct_object(node) - while self.state_generators: - state_generators = self.state_generators - self.state_generators = [] - for generator in state_generators: - for dummy in generator: - pass - self.constructed_objects = {} - self.deep_construct = False - return data - - def construct_object(self, node, deep=False): - if node in self.constructed_objects: - return self.constructed_objects[node] - if deep: - old_deep = self.deep_construct - self.deep_construct = True - constructor = None - tag_suffix = None - if node.tag in self.yaml_constructors: - constructor = self.yaml_constructors[node.tag] - else: - raise ConstructorError(None, None, 'no constructor for tag %s' % node.tag) - if tag_suffix is None: - data = constructor(self, node) - else: - data = constructor(self, tag_suffix, node) - if isinstance(data, types.GeneratorType): - generator = data - data = next(generator) - if self.deep_construct: - for dummy in generator: - pass - else: - self.state_generators.append(generator) - self.constructed_objects[node] = data - if deep: - self.deep_construct = old_deep - return data - - @marked - def construct_scalar(self, node): - if not isinstance(node, ScalarNode): - raise ConstructorError(None, None, - "expected a scalar node, but found %s" % node.id, - node.start_mark) - return node.value - - def construct_sequence(self, node, deep=False): - if not isinstance(node, SequenceNode): - raise ConstructorError(None, None, - "expected a sequence node, but found %s" % node.id, - node.start_mark) - return [self.construct_object(child, deep=deep) - for child in node.value] - - @marked - def construct_mapping(self, node, deep=False): - if not isinstance(node, MappingNode): - raise ConstructorError(None, None, - "expected a mapping node, but found %s" % node.id, - node.start_mark) - mapping = {} - for key_node, value_node in node.value: - key = self.construct_object(key_node, deep=deep) - if not isinstance(key, collections.Hashable): - self.echoerr('While constructing a mapping', node.start_mark, - 'found unhashable key', key_node.start_mark) - continue - elif type(key.value) != unicode: - self.echoerr('Error while constructing a mapping', node.start_mark, - 'found key that is not a string', key_node.start_mark) - continue - elif key in mapping: - self.echoerr('Error while constructing a mapping', node.start_mark, - 'found duplicate key', key_node.start_mark) - continue - value = self.construct_object(value_node, deep=deep) - mapping[key] = value - return mapping - - @classmethod - def add_constructor(cls, tag, constructor): - if not 'yaml_constructors' in cls.__dict__: - cls.yaml_constructors = cls.yaml_constructors.copy() - cls.yaml_constructors[tag] = constructor - - -class Constructor(BaseConstructor): - def construct_scalar(self, node): - if isinstance(node, MappingNode): - for key_node, value_node in node.value: - if key_node.tag == 'tag:yaml.org,2002:value': - return self.construct_scalar(value_node) - return BaseConstructor.construct_scalar(self, node) - - def flatten_mapping(self, node): - merge = [] - index = 0 - while index < len(node.value): - key_node, value_node = node.value[index] - if key_node.tag == 'tag:yaml.org,2002:merge': - del node.value[index] - if isinstance(value_node, MappingNode): - self.flatten_mapping(value_node) - merge.extend(value_node.value) - elif isinstance(value_node, SequenceNode): - submerge = [] - for subnode in value_node.value: - if not isinstance(subnode, MappingNode): - raise ConstructorError("while constructing a mapping", - node.start_mark, - "expected a mapping for merging, but found %s" - % subnode.id, subnode.start_mark) - self.flatten_mapping(subnode) - submerge.append(subnode.value) - submerge.reverse() - for value in submerge: - merge.extend(value) - else: - raise ConstructorError("while constructing a mapping", node.start_mark, - "expected a mapping or list of mappings for merging, but found %s" - % value_node.id, value_node.start_mark) - elif key_node.tag == 'tag:yaml.org,2002:value': - key_node.tag = 'tag:yaml.org,2002:str' - index += 1 - else: - index += 1 - if merge: - node.value = merge + node.value - - def construct_mapping(self, node, deep=False): - if isinstance(node, MappingNode): - self.flatten_mapping(node) - return BaseConstructor.construct_mapping(self, node, deep=deep) - - @marked - def construct_yaml_null(self, node): - self.construct_scalar(node) - return None - - @marked - def construct_yaml_bool(self, node): - value = self.construct_scalar(node).value - return bool(value) - - @marked - def construct_yaml_int(self, node): - value = self.construct_scalar(node).value - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - if value == '0': - return 0 - else: - return sign * int(value) - - @marked - def construct_yaml_float(self, node): - value = self.construct_scalar(node).value - sign = +1 - if value[0] == '-': - sign = -1 - if value[0] in '+-': - value = value[1:] - else: - return sign * float(value) - - def construct_yaml_str(self, node): - return self.construct_scalar(node) - - def construct_yaml_seq(self, node): - data = gen_marked_value([], node.start_mark) - yield data - data.extend(self.construct_sequence(node)) - - def construct_yaml_map(self, node): - data = gen_marked_value({}, node.start_mark) - yield data - value = self.construct_mapping(node) - data.update(value) - - def construct_undefined(self, node): - raise ConstructorError(None, None, - "could not determine a constructor for the tag %r" % node.tag, - node.start_mark) - - -Constructor.add_constructor( - 'tag:yaml.org,2002:null', - Constructor.construct_yaml_null) - -Constructor.add_constructor( - 'tag:yaml.org,2002:bool', - Constructor.construct_yaml_bool) - -Constructor.add_constructor( - 'tag:yaml.org,2002:int', - Constructor.construct_yaml_int) - -Constructor.add_constructor( - 'tag:yaml.org,2002:float', - Constructor.construct_yaml_float) - -Constructor.add_constructor( - 'tag:yaml.org,2002:str', - Constructor.construct_yaml_str) - -Constructor.add_constructor( - 'tag:yaml.org,2002:seq', - Constructor.construct_yaml_seq) - -Constructor.add_constructor( - 'tag:yaml.org,2002:map', - Constructor.construct_yaml_map) - -Constructor.add_constructor(None, - Constructor.construct_undefined) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/error.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/error.py deleted file mode 100644 index d146667..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/error.py +++ /dev/null @@ -1,99 +0,0 @@ -__all__ = ['Mark', 'MarkedError', 'echoerr', 'NON_PRINTABLE'] - - -import sys -import re - -try: - from __builtin__ import unichr -except ImportError: - unichr = chr # NOQA - - -NON_PRINTABLE = re.compile('[^\t\n\x20-\x7E' + unichr(0x85) + (unichr(0xA0) + '-' + unichr(0xD7FF)) + (unichr(0xE000) + '-' + unichr(0xFFFD)) + ']') - - -def repl(s): - return '' % ord(s.group()) - - -def strtrans(s): - return NON_PRINTABLE.sub(repl, s.replace('\t', '>---')) - - -class Mark: - def __init__(self, name, line, column, buffer, pointer): - self.name = name - self.line = line - self.column = column - self.buffer = buffer - self.pointer = pointer - - def copy(self): - return Mark(self.name, self.line, self.column, self.buffer, self.pointer) - - def get_snippet(self, indent=4, max_length=75): - if self.buffer is None: - return None - head = '' - start = self.pointer - while start > 0 and self.buffer[start - 1] not in '\0\n': - start -= 1 - if self.pointer - start > max_length / 2 - 1: - head = ' ... ' - start += 5 - break - tail = '' - end = self.pointer - while end < len(self.buffer) and self.buffer[end] not in '\0\n': - end += 1 - if end - self.pointer > max_length / 2 - 1: - tail = ' ... ' - end -= 5 - break - snippet = [self.buffer[start:self.pointer], self.buffer[self.pointer], self.buffer[self.pointer + 1:end]] - snippet = [strtrans(s) for s in snippet] - return ' ' * indent + head + ''.join(snippet) + tail + '\n' \ - + ' ' * (indent + len(head) + len(snippet[0])) + '^' - - def __str__(self): - snippet = self.get_snippet() - where = " in \"%s\", line %d, column %d" \ - % (self.name, self.line + 1, self.column + 1) - if snippet is not None: - where += ":\n" + snippet - if type(where) is str: - return where - else: - return where.encode('utf-8') - - -def echoerr(*args, **kwargs): - sys.stderr.write('\n') - sys.stderr.write(format_error(*args, **kwargs) + '\n') - - -def format_error(context=None, context_mark=None, problem=None, problem_mark=None, note=None): - lines = [] - if context is not None: - lines.append(context) - if context_mark is not None \ - and (problem is None or problem_mark is None - or context_mark.name != problem_mark.name - or context_mark.line != problem_mark.line - or context_mark.column != problem_mark.column): - lines.append(str(context_mark)) - if problem is not None: - lines.append(problem) - if problem_mark is not None: - lines.append(str(problem_mark)) - if note is not None: - lines.append(note) - return '\n'.join(lines) - - -class MarkedError(Exception): - def __init__(self, context=None, context_mark=None, - problem=None, problem_mark=None, note=None): - Exception.__init__(self, format_error(context, context_mark, problem, - problem_mark, note)) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/events.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/events.py deleted file mode 100644 index 47e2667..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/events.py +++ /dev/null @@ -1,97 +0,0 @@ -# Abstract classes. - - -class Event(object): - def __init__(self, start_mark=None, end_mark=None): - self.start_mark = start_mark - self.end_mark = end_mark - - def __repr__(self): - attributes = [key for key in ['implicit', 'value'] - if hasattr(self, key)] - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - - -class NodeEvent(Event): - def __init__(self, start_mark=None, end_mark=None): - self.start_mark = start_mark - self.end_mark = end_mark - - -class CollectionStartEvent(NodeEvent): - def __init__(self, implicit, start_mark=None, end_mark=None, - flow_style=None): - self.tag = None - self.implicit = implicit - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - - -class CollectionEndEvent(Event): - pass - - -# Implementations. - - -class StreamStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - - -class StreamEndEvent(Event): - pass - - -class DocumentStartEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None, version=None, tags=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - self.version = version - self.tags = tags - - -class DocumentEndEvent(Event): - def __init__(self, start_mark=None, end_mark=None, - explicit=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.explicit = explicit - - -class AliasEvent(NodeEvent): - pass - - -class ScalarEvent(NodeEvent): - def __init__(self, implicit, value, - start_mark=None, end_mark=None, style=None): - self.tag = None - self.implicit = implicit - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - - -class SequenceStartEvent(CollectionStartEvent): - pass - - -class SequenceEndEvent(CollectionEndEvent): - pass - - -class MappingStartEvent(CollectionStartEvent): - pass - - -class MappingEndEvent(CollectionEndEvent): - pass diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/loader.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/loader.py deleted file mode 100644 index 50ae6d0..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/loader.py +++ /dev/null @@ -1,24 +0,0 @@ -__all__ = ['Loader'] - -from .reader import Reader -from .scanner import Scanner -from .parser import Parser -from .composer import Composer -from .constructor import Constructor -from .resolver import Resolver -from .error import echoerr - - -class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): - def __init__(self, stream): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - Composer.__init__(self) - Constructor.__init__(self) - Resolver.__init__(self) - self.haserrors = False - - def echoerr(self, *args, **kwargs): - echoerr(*args, **kwargs) - self.haserrors = True diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/markedvalue.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/markedvalue.py deleted file mode 100644 index 6a304b9..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/markedvalue.py +++ /dev/null @@ -1,83 +0,0 @@ -__all__ = ['gen_marked_value', 'MarkedValue'] - - -try: - from __builtin__ import unicode -except ImportError: - unicode = str - - -def gen_new(cls): - def __new__(arg_cls, value, mark): - r = super(arg_cls, arg_cls).__new__(arg_cls, value) - r.mark = mark - r.value = value - return r - return __new__ - - -class MarkedUnicode(unicode): - __new__ = gen_new(unicode) - - def _proc_partition(self, part_result): - pointdiff = 1 - r = [] - for s in part_result: - mark = self.mark.copy() - # XXX Does not work properly with escaped strings, but this requires - # saving much more information in mark. - mark.column += pointdiff - mark.pointer += pointdiff - r.append(MarkedUnicode(s, mark)) - pointdiff += len(s) - return tuple(r) - - def rpartition(self, sep): - return self._proc_partition(super(MarkedUnicode, self).rpartition(sep)) - - def partition(self, sep): - return self._proc_partition(super(MarkedUnicode, self).partition(sep)) - - -class MarkedInt(int): - __new__ = gen_new(int) - - -class MarkedFloat(float): - __new__ = gen_new(float) - - -class MarkedValue: - def __init__(self, value, mark): - self.mark = mark - self.value = value - - -specialclasses = { - unicode: MarkedUnicode, - int: MarkedInt, - float: MarkedFloat, -} - -classcache = {} - - -def gen_marked_value(value, mark, use_special_classes=True): - if use_special_classes and value.__class__ in specialclasses: - Marked = specialclasses[value.__class__] - elif value.__class__ in classcache: - Marked = classcache[value.__class__] - else: - class Marked(MarkedValue): - for func in value.__class__.__dict__: - if func not in set(('__init__', '__new__', '__getattribute__')): - if func in set(('__eq__',)): - # HACK to make marked dictionaries always work - exec (('def {0}(self, *args):\n' - ' return self.value.{0}(*[arg.value if isinstance(arg, MarkedValue) else arg for arg in args])').format(func)) - else: - exec (('def {0}(self, *args, **kwargs):\n' - ' return self.value.{0}(*args, **kwargs)\n').format(func)) - classcache[value.__class__] = Marked - - return Marked(value, mark) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/nodes.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/nodes.py deleted file mode 100644 index 11ebb3e..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/nodes.py +++ /dev/null @@ -1,53 +0,0 @@ -class Node(object): - def __init__(self, tag, value, start_mark, end_mark): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - - def __repr__(self): - value = self.value - #if isinstance(value, list): - # if len(value) == 0: - # value = '' - # elif len(value) == 1: - # value = '<1 item>' - # else: - # value = '<%d items>' % len(value) - #else: - # if len(value) > 75: - # value = repr(value[:70]+u' ... ') - # else: - # value = repr(value) - value = repr(value) - return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) - - -class ScalarNode(Node): - id = 'scalar' - - def __init__(self, tag, value, - start_mark=None, end_mark=None, style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style - - -class CollectionNode(Node): - def __init__(self, tag, value, - start_mark=None, end_mark=None, flow_style=None): - self.tag = tag - self.value = value - self.start_mark = start_mark - self.end_mark = end_mark - self.flow_style = flow_style - - -class SequenceNode(CollectionNode): - id = 'sequence' - - -class MappingNode(CollectionNode): - id = 'mapping' diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/parser.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/parser.py deleted file mode 100644 index 998de6d..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/parser.py +++ /dev/null @@ -1,240 +0,0 @@ -__all__ = ['Parser', 'ParserError'] - -from .error import MarkedError -from .tokens import * # NOQA -from .events import * # NOQA - - -class ParserError(MarkedError): - pass - - -class Parser: - def __init__(self): - self.current_event = None - self.yaml_version = None - self.states = [] - self.marks = [] - self.state = self.parse_stream_start - - def dispose(self): - # Reset the state attributes (to clear self-references) - self.states = [] - self.state = None - - def check_event(self, *choices): - # Check the type of the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - if self.current_event is not None: - if not choices: - return True - for choice in choices: - if isinstance(self.current_event, choice): - return True - return False - - def peek_event(self): - # Get the next event. - if self.current_event is None: - if self.state: - self.current_event = self.state() - return self.current_event - - def get_event(self): - # Get the next event and proceed further. - if self.current_event is None: - if self.state: - self.current_event = self.state() - value = self.current_event - self.current_event = None - return value - - # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - # implicit_document ::= block_node DOCUMENT-END* - # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - - def parse_stream_start(self): - # Parse the stream start. - token = self.get_token() - event = StreamStartEvent(token.start_mark, token.end_mark, - encoding=token.encoding) - - # Prepare the next state. - self.state = self.parse_implicit_document_start - - return event - - def parse_implicit_document_start(self): - # Parse an implicit document. - if not self.check_token(StreamEndToken): - token = self.peek_token() - start_mark = end_mark = token.start_mark - event = DocumentStartEvent(start_mark, end_mark, explicit=False) - - # Prepare the next state. - self.states.append(self.parse_document_end) - self.state = self.parse_node - - return event - - else: - return self.parse_document_start() - - def parse_document_start(self): - # Parse an explicit document. - if not self.check_token(StreamEndToken): - token = self.peek_token() - self.echoerr(None, None, - "expected '', but found %r" % token.id, - token.start_mark) - return StreamEndEvent(token.start_mark, token.end_mark) - else: - # Parse the end of the stream. - token = self.get_token() - event = StreamEndEvent(token.start_mark, token.end_mark) - assert not self.states - assert not self.marks - self.state = None - return event - - def parse_document_end(self): - # Parse the document end. - token = self.peek_token() - start_mark = end_mark = token.start_mark - explicit = False - event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) - - # Prepare the next state. - self.state = self.parse_document_start - - return event - - def parse_document_content(self): - return self.parse_node() - - def parse_node(self, indentless_sequence=False): - start_mark = end_mark = None - if start_mark is None: - start_mark = end_mark = self.peek_token().start_mark - event = None - implicit = True - if self.check_token(ScalarToken): - token = self.get_token() - end_mark = token.end_mark - if token.plain: - implicit = (True, False) - else: - implicit = (False, True) - event = ScalarEvent(implicit, token.value, - start_mark, end_mark, style=token.style) - self.state = self.states.pop() - elif self.check_token(FlowSequenceStartToken): - end_mark = self.peek_token().end_mark - event = SequenceStartEvent(implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_sequence_first_entry - elif self.check_token(FlowMappingStartToken): - end_mark = self.peek_token().end_mark - event = MappingStartEvent(implicit, - start_mark, end_mark, flow_style=True) - self.state = self.parse_flow_mapping_first_key - else: - token = self.peek_token() - raise ParserError("while parsing a flow node", start_mark, - "expected the node content, but found %r" % token.id, - token.start_mark) - return event - - def parse_flow_sequence_first_entry(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_sequence_entry(first=True) - - def parse_flow_sequence_entry(self, first=False): - if not self.check_token(FlowSequenceEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - if self.check_token(FlowSequenceEndToken): - token = self.peek_token() - self.echoerr("While parsing a flow sequence", self.marks[-1], - "expected sequence value, but got %r" % token.id, token.start_mark) - else: - token = self.peek_token() - raise ParserError("while parsing a flow sequence", self.marks[-1], - "expected ',' or ']', but got %r" % token.id, token.start_mark) - - if not self.check_token(FlowSequenceEndToken): - self.states.append(self.parse_flow_sequence_entry) - return self.parse_node() - token = self.get_token() - event = SequenceEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_sequence_entry_mapping_end(self): - self.state = self.parse_flow_sequence_entry - token = self.peek_token() - return MappingEndEvent(token.start_mark, token.start_mark) - - def parse_flow_mapping_first_key(self): - token = self.get_token() - self.marks.append(token.start_mark) - return self.parse_flow_mapping_key(first=True) - - def parse_flow_mapping_key(self, first=False): - if not self.check_token(FlowMappingEndToken): - if not first: - if self.check_token(FlowEntryToken): - self.get_token() - if self.check_token(FlowMappingEndToken): - token = self.peek_token() - self.echoerr("While parsing a flow mapping", self.marks[-1], - "expected mapping key, but got %r" % token.id, token.start_mark) - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected ',' or '}', but got %r" % token.id, token.start_mark) - if self.check_token(KeyToken): - token = self.get_token() - if not self.check_token(ValueToken, - FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_value) - return self.parse_node() - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected value, but got %r" % token.id, token.start_mark) - elif not self.check_token(FlowMappingEndToken): - token = self.peek_token() - expect_key = self.check_token(ValueToken, FlowEntryToken) - if not expect_key: - self.get_token() - expect_key = self.check_token(ValueToken) - - if expect_key: - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected string key, but got %r" % token.id, token.start_mark) - else: - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected ':', but got %r" % token.id, token.start_mark) - token = self.get_token() - event = MappingEndEvent(token.start_mark, token.end_mark) - self.state = self.states.pop() - self.marks.pop() - return event - - def parse_flow_mapping_value(self): - if self.check_token(ValueToken): - token = self.get_token() - if not self.check_token(FlowEntryToken, FlowMappingEndToken): - self.states.append(self.parse_flow_mapping_key) - return self.parse_node() - - token = self.peek_token() - raise ParserError("while parsing a flow mapping", self.marks[-1], - "expected mapping value, but got %r" % token.id, token.start_mark) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/reader.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/reader.py deleted file mode 100644 index f59605e..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/reader.py +++ /dev/null @@ -1,135 +0,0 @@ -# This module contains abstractions for the input stream. You don't have to -# looks further, there are no pretty code. - -__all__ = ['Reader', 'ReaderError'] - -from .error import MarkedError, Mark, NON_PRINTABLE - -import codecs - -try: - from __builtin__ import unicode -except ImportError: - unicode = str # NOQA - - -class ReaderError(MarkedError): - pass - - -class Reader(object): - # Reader: - # - determines the data encoding and converts it to a unicode string, - # - checks if characters are in allowed range, - # - adds '\0' to the end. - - # Reader accepts - # - a file-like object with its `read` method returning `str`, - - # Yeah, it's ugly and slow. - def __init__(self, stream): - self.name = None - self.stream = None - self.stream_pointer = 0 - self.eof = True - self.buffer = '' - self.pointer = 0 - self.full_buffer = unicode('') - self.full_pointer = 0 - self.raw_buffer = None - self.raw_decode = codecs.utf_8_decode - self.encoding = 'utf-8' - self.index = 0 - self.line = 0 - self.column = 0 - - self.stream = stream - self.name = getattr(stream, 'name', "") - self.eof = False - self.raw_buffer = None - - while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): - self.update_raw() - self.update(1) - - def peek(self, index=0): - try: - return self.buffer[self.pointer + index] - except IndexError: - self.update(index + 1) - return self.buffer[self.pointer + index] - - def prefix(self, length=1): - if self.pointer + length >= len(self.buffer): - self.update(length) - return self.buffer[self.pointer:self.pointer + length] - - def update_pointer(self, length): - while length: - ch = self.buffer[self.pointer] - self.pointer += 1 - self.full_pointer += 1 - self.index += 1 - if ch == '\n': - self.line += 1 - self.column = 0 - else: - self.column += 1 - length -= 1 - - def forward(self, length=1): - if self.pointer + length + 1 >= len(self.buffer): - self.update(length + 1) - self.update_pointer(length) - - def get_mark(self): - return Mark(self.name, self.line, self.column, self.full_buffer, self.full_pointer) - - def check_printable(self, data): - match = NON_PRINTABLE.search(data) - if match: - self.update_pointer(match.start()) - raise ReaderError('while reading from stream', None, - 'found special characters which are not allowed', - Mark(self.name, self.line, self.column, self.full_buffer, self.full_pointer)) - - def update(self, length): - if self.raw_buffer is None: - return - self.buffer = self.buffer[self.pointer:] - self.pointer = 0 - while len(self.buffer) < length: - if not self.eof: - self.update_raw() - try: - data, converted = self.raw_decode(self.raw_buffer, - 'strict', self.eof) - except UnicodeDecodeError as exc: - character = self.raw_buffer[exc.start] - position = self.stream_pointer - len(self.raw_buffer) + exc.start - data, converted = self.raw_decode(self.raw_buffer[:exc.start], 'strict', self.eof) - self.buffer += data - self.full_buffer += data + '<' + str(ord(character)) + '>' - self.raw_buffer = self.raw_buffer[converted:] - self.update_pointer(exc.start - 1) - raise ReaderError('while reading from stream', None, - 'found character #x%04x that cannot be decoded by UTF-8 codec' % ord(character), - Mark(self.name, self.line, self.column, self.full_buffer, position)) - self.buffer += data - self.full_buffer += data - self.raw_buffer = self.raw_buffer[converted:] - self.check_printable(data) - if self.eof: - self.buffer += '\0' - self.raw_buffer = None - break - - def update_raw(self, size=4096): - data = self.stream.read(size) - if self.raw_buffer is None: - self.raw_buffer = data - else: - self.raw_buffer += data - self.stream_pointer += len(data) - if not data: - self.eof = True diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/resolver.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/resolver.py deleted file mode 100644 index f628a87..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/resolver.py +++ /dev/null @@ -1,131 +0,0 @@ -__all__ = ['BaseResolver', 'Resolver'] - -from .error import MarkedError -from .nodes import * # NOQA - -import re - - -class ResolverError(MarkedError): - pass - - -class BaseResolver: - DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' - DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' - DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' - - yaml_implicit_resolvers = {} - yaml_path_resolvers = {} - - def __init__(self): - self.resolver_exact_paths = [] - self.resolver_prefix_paths = [] - - @classmethod - def add_implicit_resolver(cls, tag, regexp, first): - if not 'yaml_implicit_resolvers' in cls.__dict__: - cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() - if first is None: - first = [None] - for ch in first: - cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) - - def descend_resolver(self, current_node, current_index): - if not self.yaml_path_resolvers: - return - exact_paths = {} - prefix_paths = [] - if current_node: - depth = len(self.resolver_prefix_paths) - for path, kind in self.resolver_prefix_paths[-1]: - if self.check_resolver_prefix(depth, path, kind, - current_node, current_index): - if len(path) > depth: - prefix_paths.append((path, kind)) - else: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - for path, kind in self.yaml_path_resolvers: - if not path: - exact_paths[kind] = self.yaml_path_resolvers[path, kind] - else: - prefix_paths.append((path, kind)) - self.resolver_exact_paths.append(exact_paths) - self.resolver_prefix_paths.append(prefix_paths) - - def ascend_resolver(self): - if not self.yaml_path_resolvers: - return - self.resolver_exact_paths.pop() - self.resolver_prefix_paths.pop() - - def check_resolver_prefix(self, depth, path, kind, - current_node, current_index): - node_check, index_check = path[depth - 1] - if isinstance(node_check, str): - if current_node.tag != node_check: - return - elif node_check is not None: - if not isinstance(current_node, node_check): - return - if index_check is True and current_index is not None: - return - if (index_check is False or index_check is None) \ - and current_index is None: - return - if isinstance(index_check, str): - if not (isinstance(current_index, ScalarNode) - and index_check == current_index.value): - return - elif isinstance(index_check, int) and not isinstance(index_check, bool): - if index_check != current_index: - return - return True - - def resolve(self, kind, value, implicit, mark=None): - if kind is ScalarNode and implicit[0]: - if value == '': - resolvers = self.yaml_implicit_resolvers.get('', []) - else: - resolvers = self.yaml_implicit_resolvers.get(value[0], []) - resolvers += self.yaml_implicit_resolvers.get(None, []) - for tag, regexp in resolvers: - if regexp.match(value): - return tag - else: - self.echoerr('While resolving plain scalar', None, - 'expected floating-point value, integer, null or boolean, but got %r' % value, - mark) - return self.DEFAULT_SCALAR_TAG - if kind is ScalarNode: - return self.DEFAULT_SCALAR_TAG - elif kind is SequenceNode: - return self.DEFAULT_SEQUENCE_TAG - elif kind is MappingNode: - return self.DEFAULT_MAPPING_TAG - - -class Resolver(BaseResolver): - pass - - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:bool', - re.compile(r'''^(?:true|false)$''', re.X), - list('yYnNtTfFoO')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:float', - re.compile(r'^-?(?:0|[1-9]\d*)(?=[.eE])(?:\.\d+)?(?:[eE][-+]?\d+)?$', re.X), - list('-0123456789')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:int', - re.compile(r'^(?:0|-?[1-9]\d*)$', re.X), - list('-0123456789')) - -Resolver.add_implicit_resolver( - 'tag:yaml.org,2002:null', - re.compile(r'^null$', re.X), - ['n']) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/scanner.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/scanner.py deleted file mode 100644 index 2183f65..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/scanner.py +++ /dev/null @@ -1,474 +0,0 @@ -# Scanner produces tokens of the following types: -# STREAM-START -# STREAM-END -# DOCUMENT-START -# DOCUMENT-END -# FLOW-SEQUENCE-START -# FLOW-MAPPING-START -# FLOW-SEQUENCE-END -# FLOW-MAPPING-END -# FLOW-ENTRY -# KEY -# VALUE -# SCALAR(value, plain, style) -# -# Read comments in the Scanner code for more details. - -__all__ = ['Scanner', 'ScannerError'] - -from .error import MarkedError -from .tokens import * # NOQA - - -class ScannerError(MarkedError): - pass - - -try: - from __builtin__ import unicode -except ImportError: - unicode = str # NOQA - - -class SimpleKey: - # See below simple keys treatment. - def __init__(self, token_number, index, line, column, mark): - self.token_number = token_number - self.index = index - self.line = line - self.column = column - self.mark = mark - - -class Scanner: - def __init__(self): - """Initialize the scanner.""" - # It is assumed that Scanner and Reader will have a common descendant. - # Reader do the dirty work of checking for BOM and converting the - # input data to Unicode. It also adds NUL to the end. - # - # Reader supports the following methods - # self.peek(i=0) # peek the next i-th character - # self.prefix(l=1) # peek the next l characters - # self.forward(l=1) # read the next l characters and move the pointer. - - # Had we reached the end of the stream? - self.done = False - - # The number of unclosed '{' and '['. `flow_level == 0` means block - # context. - self.flow_level = 0 - - # List of processed tokens that are not yet emitted. - self.tokens = [] - - # Add the STREAM-START token. - self.fetch_stream_start() - - # Number of tokens that were emitted through the `get_token` method. - self.tokens_taken = 0 - - # Variables related to simple keys treatment. - - # A simple key is a key that is not denoted by the '?' indicator. - # We emit the KEY token before all keys, so when we find a potential - # simple key, we try to locate the corresponding ':' indicator. - # Simple keys should be limited to a single line. - - # Can a simple key start at the current position? A simple key may - # start: - # - after '{', '[', ',' (in the flow context), - self.allow_simple_key = False - - # Keep track of possible simple keys. This is a dictionary. The key - # is `flow_level`; there can be no more that one possible simple key - # for each level. The value is a SimpleKey record: - # (token_number, index, line, column, mark) - # A simple key may start with SCALAR(flow), '[', or '{' tokens. - self.possible_simple_keys = {} - - # Public methods. - - def check_token(self, *choices): - # Check if the next token is one of the given types. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - if not choices: - return True - for choice in choices: - if isinstance(self.tokens[0], choice): - return True - return False - - def peek_token(self): - # Return the next token, but do not delete if from the queue. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - return self.tokens[0] - - def get_token(self): - # Return the next token. - while self.need_more_tokens(): - self.fetch_more_tokens() - if self.tokens: - self.tokens_taken += 1 - return self.tokens.pop(0) - - # Private methods. - - def need_more_tokens(self): - if self.done: - return False - if not self.tokens: - return True - # The current token may be a potential simple key, so we - # need to look further. - self.stale_possible_simple_keys() - if self.next_possible_simple_key() == self.tokens_taken: - return True - - def fetch_more_tokens(self): - - # Eat whitespaces and comments until we reach the next token. - self.scan_to_next_token() - - # Remove obsolete possible simple keys. - self.stale_possible_simple_keys() - - # Peek the next character. - ch = self.peek() - - # Is it the end of stream? - if ch == '\0': - return self.fetch_stream_end() - - # Note: the order of the following checks is NOT significant. - - # Is it the flow sequence start indicator? - if ch == '[': - return self.fetch_flow_sequence_start() - - # Is it the flow mapping start indicator? - if ch == '{': - return self.fetch_flow_mapping_start() - - # Is it the flow sequence end indicator? - if ch == ']': - return self.fetch_flow_sequence_end() - - # Is it the flow mapping end indicator? - if ch == '}': - return self.fetch_flow_mapping_end() - - # Is it the flow entry indicator? - if ch == ',': - return self.fetch_flow_entry() - - # Is it the value indicator? - if ch == ':' and self.flow_level: - return self.fetch_value() - - # Is it a double quoted scalar? - if ch == '\"': - return self.fetch_double() - - # It must be a plain scalar then. - if self.check_plain(): - return self.fetch_plain() - - # No? It's an error. Let's produce a nice error message. - raise ScannerError("while scanning for the next token", None, - "found character %r that cannot start any token" % ch, - self.get_mark()) - - # Simple keys treatment. - - def next_possible_simple_key(self): - # Return the number of the nearest possible simple key. Actually we - # don't need to loop through the whole dictionary. We may replace it - # with the following code: - # if not self.possible_simple_keys: - # return None - # return self.possible_simple_keys[ - # min(self.possible_simple_keys.keys())].token_number - min_token_number = None - for level in self.possible_simple_keys: - key = self.possible_simple_keys[level] - if min_token_number is None or key.token_number < min_token_number: - min_token_number = key.token_number - return min_token_number - - def stale_possible_simple_keys(self): - # Remove entries that are no longer possible simple keys. According to - # the YAML specification, simple keys - # - should be limited to a single line, - # Disabling this procedure will allow simple keys of any length and - # height (may cause problems if indentation is broken though). - for level in list(self.possible_simple_keys): - key = self.possible_simple_keys[level] - if key.line != self.line: - del self.possible_simple_keys[level] - - def save_possible_simple_key(self): - # The next token may start a simple key. We check if it's possible - # and save its position. This function is called for - # SCALAR(flow), '[', and '{'. - - # The next token might be a simple key. Let's save it's number and - # position. - if self.allow_simple_key: - self.remove_possible_simple_key() - token_number = self.tokens_taken + len(self.tokens) - key = SimpleKey(token_number, - self.index, self.line, self.column, self.get_mark()) - self.possible_simple_keys[self.flow_level] = key - - def remove_possible_simple_key(self): - # Remove the saved possible key position at the current flow level. - if self.flow_level in self.possible_simple_keys: - del self.possible_simple_keys[self.flow_level] - - # Fetchers. - - def fetch_stream_start(self): - # We always add STREAM-START as the first token and STREAM-END as the - # last token. - - # Read the token. - mark = self.get_mark() - - # Add STREAM-START. - self.tokens.append(StreamStartToken(mark, mark, - encoding=self.encoding)) - - def fetch_stream_end(self): - # Reset simple keys. - self.remove_possible_simple_key() - self.allow_simple_key = False - self.possible_simple_keys = {} - - # Read the token. - mark = self.get_mark() - - # Add STREAM-END. - self.tokens.append(StreamEndToken(mark, mark)) - - # The steam is finished. - self.done = True - - def fetch_flow_sequence_start(self): - self.fetch_flow_collection_start(FlowSequenceStartToken) - - def fetch_flow_mapping_start(self): - self.fetch_flow_collection_start(FlowMappingStartToken) - - def fetch_flow_collection_start(self, TokenClass): - - # '[' and '{' may start a simple key. - self.save_possible_simple_key() - - # Increase the flow level. - self.flow_level += 1 - - # Simple keys are allowed after '[' and '{'. - self.allow_simple_key = True - - # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_flow_sequence_end(self): - self.fetch_flow_collection_end(FlowSequenceEndToken) - - def fetch_flow_mapping_end(self): - self.fetch_flow_collection_end(FlowMappingEndToken) - - def fetch_flow_collection_end(self, TokenClass): - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Decrease the flow level. - self.flow_level -= 1 - - # No simple keys after ']' or '}'. - self.allow_simple_key = False - - # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(TokenClass(start_mark, end_mark)) - - def fetch_value(self): - # Do we determine a simple key? - if self.flow_level in self.possible_simple_keys: - - # Add KEY. - key = self.possible_simple_keys[self.flow_level] - del self.possible_simple_keys[self.flow_level] - self.tokens.insert(key.token_number - self.tokens_taken, - KeyToken(key.mark, key.mark)) - - # There cannot be two simple keys one after another. - self.allow_simple_key = False - - # Add VALUE. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(ValueToken(start_mark, end_mark)) - - def fetch_flow_entry(self): - - # Simple keys are allowed after ','. - self.allow_simple_key = True - - # Reset possible simple key on the current level. - self.remove_possible_simple_key() - - # Add FLOW-ENTRY. - start_mark = self.get_mark() - self.forward() - end_mark = self.get_mark() - self.tokens.append(FlowEntryToken(start_mark, end_mark)) - - def fetch_double(self): - # A flow scalar could be a simple key. - self.save_possible_simple_key() - - # No simple keys after flow scalars. - self.allow_simple_key = False - - # Scan and add SCALAR. - self.tokens.append(self.scan_flow_scalar()) - - def fetch_plain(self): - - self.save_possible_simple_key() - - # No simple keys after plain scalars. - self.allow_simple_key = False - - # Scan and add SCALAR. May change `allow_simple_key`. - self.tokens.append(self.scan_plain()) - - # Checkers. - - def check_plain(self): - return self.peek() in '0123456789-ntf' - - # Scanners. - - def scan_to_next_token(self): - while self.peek() in ' \t\n': - self.forward() - - def scan_flow_scalar(self): - # See the specification for details. - # Note that we loose indentation rules for quoted scalars. Quoted - # scalars don't need to adhere indentation because " and ' clearly - # mark the beginning and the end of them. Therefore we are less - # restrictive then the specification requires. We only need to check - # that document separators are not included in scalars. - chunks = [] - start_mark = self.get_mark() - quote = self.peek() - self.forward() - chunks.extend(self.scan_flow_scalar_non_spaces(start_mark)) - while self.peek() != quote: - chunks.extend(self.scan_flow_scalar_spaces(start_mark)) - chunks.extend(self.scan_flow_scalar_non_spaces(start_mark)) - self.forward() - end_mark = self.get_mark() - return ScalarToken(unicode().join(chunks), False, start_mark, end_mark, '"') - - ESCAPE_REPLACEMENTS = { - 'b': '\x08', - 't': '\x09', - 'n': '\x0A', - 'f': '\x0C', - 'r': '\x0D', - '\"': '\"', - '\\': '\\', - } - - ESCAPE_CODES = { - 'u': 4, - } - - def scan_flow_scalar_non_spaces(self, start_mark): - # See the specification for details. - chunks = [] - while True: - length = 0 - while self.peek(length) not in '\"\\\0 \t\n': - length += 1 - if length: - chunks.append(self.prefix(length)) - self.forward(length) - ch = self.peek() - if ch == '\\': - self.forward() - ch = self.peek() - if ch in self.ESCAPE_REPLACEMENTS: - chunks.append(self.ESCAPE_REPLACEMENTS[ch]) - self.forward() - elif ch in self.ESCAPE_CODES: - length = self.ESCAPE_CODES[ch] - self.forward() - for k in range(length): - if self.peek(k) not in '0123456789ABCDEFabcdef': - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % - (length, self.peek(k)), self.get_mark()) - code = int(self.prefix(length), 16) - chunks.append(chr(code)) - self.forward(length) - else: - raise ScannerError("while scanning a double-quoted scalar", start_mark, - "found unknown escape character %r" % ch, self.get_mark()) - else: - return chunks - - def scan_flow_scalar_spaces(self, start_mark): - # See the specification for details. - chunks = [] - length = 0 - while self.peek(length) in ' \t': - length += 1 - whitespaces = self.prefix(length) - self.forward(length) - ch = self.peek() - if ch == '\0': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected end of stream", self.get_mark()) - elif ch == '\n': - raise ScannerError("while scanning a quoted scalar", start_mark, - "found unexpected line end", self.get_mark()) - else: - chunks.append(whitespaces) - return chunks - - def scan_plain(self): - chunks = [] - start_mark = self.get_mark() - spaces = [] - while True: - length = 0 - while True: - if self.peek(length) not in 'eE.0123456789nul-tr+fas': - break - length += 1 - if length == 0: - break - self.allow_simple_key = False - chunks.extend(spaces) - chunks.append(self.prefix(length)) - self.forward(length) - end_mark = self.get_mark() - return ScalarToken(''.join(chunks), True, start_mark, end_mark) diff --git a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/tokens.py b/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/tokens.py deleted file mode 100644 index 8c5b38c..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/lint/markedjson/tokens.py +++ /dev/null @@ -1,65 +0,0 @@ -class Token(object): - def __init__(self, start_mark, end_mark): - self.start_mark = start_mark - self.end_mark = end_mark - - def __repr__(self): - attributes = [key for key in self.__dict__ - if not key.endswith('_mark')] - attributes.sort() - arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) - for key in attributes]) - return '%s(%s)' % (self.__class__.__name__, arguments) - - -class StreamStartToken(Token): - id = '' - - def __init__(self, start_mark=None, end_mark=None, - encoding=None): - self.start_mark = start_mark - self.end_mark = end_mark - self.encoding = encoding - - -class StreamEndToken(Token): - id = '' - - -class FlowSequenceStartToken(Token): - id = '[' - - -class FlowMappingStartToken(Token): - id = '{' - - -class FlowSequenceEndToken(Token): - id = ']' - - -class FlowMappingEndToken(Token): - id = '}' - - -class KeyToken(Token): - id = '?' - - -class ValueToken(Token): - id = ':' - - -class FlowEntryToken(Token): - id = ',' - - -class ScalarToken(Token): - id = '' - - def __init__(self, value, plain, start_mark, end_mark, style=None): - self.value = value - self.plain = plain - self.start_mark = start_mark - self.end_mark = end_mark - self.style = style diff --git a/common/.local/lib/python2.7/site-packages/powerline/matcher.py b/common/.local/lib/python2.7/site-packages/powerline/matcher.py deleted file mode 100644 index 5578d28..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/matcher.py +++ /dev/null @@ -1,19 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import -import sys - - -def gen_matcher_getter(ext, import_paths): - def get(match_name): - match_module, separator, match_function = match_name.rpartition('.') - if not separator: - match_module = 'powerline.matchers.{0}'.format(ext) - match_function = match_name - oldpath = sys.path - sys.path = import_paths + sys.path - try: - return getattr(__import__(match_module, fromlist=[match_function]), match_function) - finally: - sys.path = oldpath - return get diff --git a/common/.local/lib/python2.7/site-packages/powerline/matchers/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/matchers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/common/.local/lib/python2.7/site-packages/powerline/matchers/vim.py b/common/.local/lib/python2.7/site-packages/powerline/matchers/vim.py deleted file mode 100644 index 9d8eec3..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/matchers/vim.py +++ /dev/null @@ -1,19 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import - -import os -from powerline.bindings.vim import getbufvar - - -def help(matcher_info): - return str(getbufvar(matcher_info['bufnr'], '&buftype')) == 'help' - - -def cmdwin(matcher_info): - name = matcher_info['buffer'].name - return name and os.path.basename(name) == '[Command Line]' - - -def quickfix(matcher_info): - return str(getbufvar(matcher_info['bufnr'], '&buftype')) == 'quickfix' diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderer.py b/common/.local/lib/python2.7/site-packages/powerline/renderer.py deleted file mode 100644 index 848db1e..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderer.py +++ /dev/null @@ -1,319 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.theme import Theme -from unicodedata import east_asian_width, combining -import os - - -try: - NBSP = unicode(' ', 'utf-8') -except NameError: - NBSP = ' ' - - -def construct_returned_value(rendered_highlighted, segments, output_raw): - if output_raw: - return rendered_highlighted, ''.join((segment['_rendered_raw'] for segment in segments)) - else: - return rendered_highlighted - - -class Renderer(object): - '''Object that is responsible for generating the highlighted string. - - :param dict theme_config: - Main theme configuration. - :param local_themes: - Local themes. Is to be used by subclasses from ``.get_theme()`` method, - base class only records this parameter to a ``.local_themes`` attribute. - :param dict theme_kwargs: - Keyword arguments for ``Theme`` class constructor. - :param Colorscheme colorscheme: - Colorscheme object that holds colors configuration. - :param PowerlineLogger pl: - Object used for logging. - :param int ambiwidth: - Width of the characters with east asian width unicode attribute equal to - ``A`` (Ambigious). - :param dict options: - Various options. Are normally not used by base renderer, but all options - are recorded as attributes. - ''' - - segment_info = { - 'environ': os.environ, - 'getcwd': getattr(os, 'getcwdu', os.getcwd), - 'home': os.environ.get('HOME'), - } - '''Basic segment info. Is merged with local segment information by - ``.get_segment_info()`` method. Keys: - - ``environ`` - Object containing environment variables. Must define at least the - following methods: ``.__getitem__(var)`` that raises ``KeyError`` in - case requested environment variable is not present, ``.get(var, - default=None)`` that works like ``dict.get`` and be able to be passed to - ``Popen``. - - ``getcwd`` - Function that returns current working directory. Will be called without - any arguments, should return ``unicode`` or (in python-2) regular - string. - - ``home`` - String containing path to home directory. Should be ``unicode`` or (in - python-2) regular string or ``None``. - ''' - - def __init__(self, - theme_config, - local_themes, - theme_kwargs, - colorscheme, - pl, - ambiwidth=1, - **options): - self.__dict__.update(options) - self.theme_config = theme_config - theme_kwargs['pl'] = pl - self.pl = pl - self.theme = Theme(theme_config=theme_config, **theme_kwargs) - self.local_themes = local_themes - self.theme_kwargs = theme_kwargs - self.colorscheme = colorscheme - self.width_data = { - 'N': 1, # Neutral - 'Na': 1, # Narrow - 'A': ambiwidth, # Ambigious - 'H': 1, # Half-width - 'W': 2, # Wide - 'F': 2, # Fullwidth - } - - def strwidth(self, string): - '''Function that returns string width. - - Is used to calculate the place given string occupies when handling - ``width`` argument to ``.render()`` method. Must take east asian width - into account. - - :param unicode string: - String whose width will be calculated. - - :return: unsigned integer. - ''' - return sum((0 if combining(symbol) else self.width_data[east_asian_width(symbol)] for symbol in string)) - - def get_theme(self, matcher_info): - '''Get Theme object. - - Is to be overridden by subclasses to support local themes, this variant - only returns ``.theme`` attribute. - - :param matcher_info: - Parameter ``matcher_info`` that ``.render()`` method received. - Unused. - ''' - return self.theme - - def shutdown(self): - '''Prepare for interpreter shutdown. The only job it is supposed to do - is calling ``.shutdown()`` method for all theme objects. Should be - overridden by subclasses in case they support local themes. - ''' - self.theme.shutdown() - - def _get_highlighting(self, segment, mode): - segment['highlight'] = self.colorscheme.get_highlighting(segment['highlight_group'], mode, segment.get('gradient_level')) - if segment['divider_highlight_group']: - segment['divider_highlight'] = self.colorscheme.get_highlighting(segment['divider_highlight_group'], mode) - else: - segment['divider_highlight'] = None - return segment - - def get_segment_info(self, segment_info): - '''Get segment information. - - Must return a dictionary containing at least ``home``, ``environ`` and - ``getcwd`` keys (see documentation for ``segment_info`` attribute). This - implementation merges ``segment_info`` dictionary passed to - ``.render()`` method with ``.segment_info`` attribute, preferring keys - from the former. It also replaces ``getcwd`` key with function returning - ``segment_info['environ']['PWD']`` in case ``PWD`` variable is - available. - - :param dict segment_info: - Segment information that was passed to ``.render()`` method. - - :return: dict with segment information. - ''' - r = self.segment_info.copy() - if segment_info: - r.update(segment_info) - if 'PWD' in r['environ']: - r['getcwd'] = lambda: r['environ']['PWD'] - return r - - def render(self, mode=None, width=None, side=None, output_raw=False, segment_info=None, matcher_info=None): - '''Render all segments. - - When a width is provided, low-priority segments are dropped one at - a time until the line is shorter than the width, or only segments - with a negative priority are left. If one or more filler segments are - provided they will fill the remaining space until the desired width is - reached. - - :param str mode: - Mode string. Affects contents (colors and the set of segments) of - rendered string. - :param int width: - Maximum width text can occupy. May be exceeded if there are too much - non-removable segments. - :param str side: - One of ``left``, ``right``. Determines which side will be rendered. - If not present all sides are rendered. - :param bool output_raw: - Changes the output: if this parameter is ``True`` then in place of - one string this method outputs a pair ``(colored_string, - colorless_string)``. - :param dict segment_info: - Segment information. See also ``.get_segment_info()`` method. - :param matcher_info: - Matcher information. Is processed in ``.get_theme()`` method. - ''' - theme = self.get_theme(matcher_info) - segments = theme.get_segments(side, self.get_segment_info(segment_info)) - - # Handle excluded/included segments for the current mode - segments = [self._get_highlighting(segment, mode) for segment in segments - if mode not in segment['exclude_modes'] or (segment['include_modes'] and segment in segment['include_modes'])] - - segments = [segment for segment in self._render_segments(theme, segments)] - - if not width: - # No width specified, so we don't need to crop or pad anything - return construct_returned_value(''.join([segment['_rendered_hl'] for segment in segments]) + self.hlstyle(), segments, output_raw) - - # Create an ordered list of segments that can be dropped - segments_priority = sorted((segment for segment in segments if segment['priority'] is not None), key=lambda segment: segment['priority'], reverse=True) - while sum([segment['_len'] for segment in segments]) > width and len(segments_priority): - segments.remove(segments_priority[0]) - segments_priority.pop(0) - - # Distribute the remaining space on spacer segments - segments_spacers = [segment for segment in segments if segment['width'] == 'auto'] - if segments_spacers: - distribute_len, distribute_len_remainder = divmod(width - sum([segment['_len'] for segment in segments]), len(segments_spacers)) - for segment in segments_spacers: - if segment['align'] == 'l': - segment['_space_right'] += distribute_len - elif segment['align'] == 'r': - segment['_space_left'] += distribute_len - elif segment['align'] == 'c': - space_side, space_side_remainder = divmod(distribute_len, 2) - segment['_space_left'] += space_side + space_side_remainder - segment['_space_right'] += space_side - segments_spacers[0]['_space_right'] += distribute_len_remainder - - rendered_highlighted = ''.join([segment['_rendered_hl'] for segment in self._render_segments(theme, segments)]) + self.hlstyle() - - return construct_returned_value(rendered_highlighted, segments, output_raw) - - def _render_segments(self, theme, segments, render_highlighted=True): - '''Internal segment rendering method. - - This method loops through the segment array and compares the - foreground/background colors and divider properties and returns the - rendered statusline as a string. - - The method always renders the raw segment contents (i.e. without - highlighting strings added), and only renders the highlighted - statusline if render_highlighted is True. - ''' - segments_len = len(segments) - - for index, segment in enumerate(segments): - segment['_rendered_raw'] = '' - segment['_rendered_hl'] = '' - - prev_segment = segments[index - 1] if index > 0 else theme.EMPTY_SEGMENT - next_segment = segments[index + 1] if index < segments_len - 1 else theme.EMPTY_SEGMENT - compare_segment = next_segment if segment['side'] == 'left' else prev_segment - outer_padding = ' ' if (index == 0 and segment['side'] == 'left') or (index == segments_len - 1 and segment['side'] == 'right') else '' - divider_type = 'soft' if compare_segment['highlight']['bg'] == segment['highlight']['bg'] else 'hard' - - divider_raw = theme.get_divider(segment['side'], divider_type) - divider_spaces = theme.get_spaces() - divider_highlighted = '' - contents_raw = segment['contents'] - contents_highlighted = '' - draw_divider = segment['draw_' + divider_type + '_divider'] - - # Pad segments first - if draw_divider: - if segment['side'] == 'left': - contents_raw = outer_padding + (segment['_space_left'] * ' ') + contents_raw + ((divider_spaces + segment['_space_right']) * ' ') - else: - contents_raw = ((divider_spaces + segment['_space_left']) * ' ') + contents_raw + (segment['_space_right'] * ' ') + outer_padding - else: - if segment['side'] == 'left': - contents_raw = outer_padding + (segment['_space_left'] * ' ') + contents_raw + (segment['_space_right'] * ' ') - else: - contents_raw = (segment['_space_left'] * ' ') + contents_raw + (segment['_space_right'] * ' ') + outer_padding - - # Replace spaces with no-break spaces - contents_raw = contents_raw.replace(' ', NBSP) - divider_raw = divider_raw.replace(' ', NBSP) - - # Apply highlighting to padded dividers and contents - if render_highlighted: - if divider_type == 'soft': - divider_highlight_group_key = 'highlight' if segment['divider_highlight_group'] is None else 'divider_highlight' - divider_fg = segment[divider_highlight_group_key]['fg'] - divider_bg = segment[divider_highlight_group_key]['bg'] - else: - divider_fg = segment['highlight']['bg'] - divider_bg = compare_segment['highlight']['bg'] - divider_highlighted = self.hl(divider_raw, divider_fg, divider_bg, False) - contents_highlighted = self.hl(self.escape(contents_raw), **segment['highlight']) - - # Append padded raw and highlighted segments to the rendered segment variables - if draw_divider: - if segment['side'] == 'left': - segment['_rendered_raw'] += contents_raw + divider_raw - segment['_rendered_hl'] += contents_highlighted + divider_highlighted - else: - segment['_rendered_raw'] += divider_raw + contents_raw - segment['_rendered_hl'] += divider_highlighted + contents_highlighted - else: - if segment['side'] == 'left': - segment['_rendered_raw'] += contents_raw - segment['_rendered_hl'] += contents_highlighted - else: - segment['_rendered_raw'] += contents_raw - segment['_rendered_hl'] += contents_highlighted - segment['_len'] = self.strwidth(segment['_rendered_raw']) - yield segment - - @staticmethod - def escape(string): - '''Method that escapes segment contents. - ''' - return string - - def hlstyle(fg=None, bg=None, attr=None): - '''Output highlight style string. - - Assuming highlighted string looks like ``{style}{contents}`` this method - should output ``{style}``. If it is called without arguments this method - is supposed to reset style to its default. - ''' - raise NotImplementedError - - def hl(self, contents, fg=None, bg=None, attr=None): - '''Output highlighted chunk. - - This implementation just outputs ``.hlstyle()`` joined with - ``contents``. - ''' - return self.hlstyle(fg, bg, attr) + (contents or '') diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/bash_prompt.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/bash_prompt.py deleted file mode 100644 index 01f42f7..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/bash_prompt.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.renderers.shell import ShellRenderer - - -class BashPromptRenderer(ShellRenderer): - '''Powerline bash prompt segment renderer.''' - escape_hl_start = '\[' - escape_hl_end = '\]' - - @staticmethod - def escape(string): - return string.replace('\\', '\\\\').replace('$', '\\$').replace('`', '\\`') - - -renderer = BashPromptRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/ipython.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/ipython.py deleted file mode 100644 index f5c2b24..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/ipython.py +++ /dev/null @@ -1,35 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.renderers.shell import ShellRenderer -from powerline.theme import Theme - - -class IpythonRenderer(ShellRenderer): - '''Powerline ipython segment renderer.''' - escape_hl_start = '\x01' - escape_hl_end = '\x02' - - def get_segment_info(self, segment_info): - r = self.segment_info.copy() - r['ipython'] = segment_info - return r - - def get_theme(self, matcher_info): - if matcher_info == 'in': - return self.theme - else: - match = self.local_themes[matcher_info] - try: - return match['theme'] - except KeyError: - match['theme'] = Theme(theme_config=match['config'], top_theme_config=self.theme_config, **self.theme_kwargs) - return match['theme'] - - def shutdown(self): - self.theme.shutdown() - for match in self.local_themes.values(): - if 'theme' in match: - match['theme'].shutdown() - - -renderer = IpythonRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/pango_markup.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/pango_markup.py deleted file mode 100644 index ea1fe15..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/pango_markup.py +++ /dev/null @@ -1,38 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.renderer import Renderer -from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE - -from xml.sax.saxutils import escape as _escape - - -class PangoMarkupRenderer(Renderer): - '''Powerline Pango markup segment renderer.''' - - @staticmethod - def hlstyle(*args, **kwargs): - # We don't need to explicitly reset attributes, so skip those calls - return '' - - def hl(self, contents, fg=None, bg=None, attr=None): - '''Highlight a segment.''' - awesome_attr = [] - if fg is not None: - if fg is not False and fg[1] is not False: - awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])] - if bg is not None: - if bg is not False and bg[1] is not False: - awesome_attr += ['background="#{0:06x}"'.format(bg[1])] - if attr is not None and attr is not False: - if attr & ATTR_BOLD: - awesome_attr += ['font_weight="bold"'] - if attr & ATTR_ITALIC: - awesome_attr += ['font_style="italic"'] - if attr & ATTR_UNDERLINE: - awesome_attr += ['underline="single"'] - return '' + contents + '' - - escape = staticmethod(_escape) - - -renderer = PangoMarkupRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/shell.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/shell.py deleted file mode 100644 index 945dc76..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/shell.py +++ /dev/null @@ -1,70 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.renderer import Renderer -from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE - - -def int_to_rgb(num): - r = (num >> 16) & 0xff - g = (num >> 8) & 0xff - b = num & 0xff - return r, g, b - - -class ShellRenderer(Renderer): - '''Powerline shell segment renderer.''' - escape_hl_start = '' - escape_hl_end = '' - term_truecolor = False - tmux_escape = False - screen_escape = False - - def hlstyle(self, fg=None, bg=None, attr=None): - '''Highlight a segment. - - If an argument is None, the argument is ignored. If an argument is - False, the argument is reset to the terminal defaults. If an argument - is a valid color or attribute, it's added to the ANSI escape code. - ''' - ansi = [0] - if fg is not None: - if fg is False or fg[0] is False: - ansi += [39] - else: - if self.term_truecolor: - ansi += [38, 2] + list(int_to_rgb(fg[1])) - else: - ansi += [38, 5, fg[0]] - if bg is not None: - if bg is False or bg[0] is False: - ansi += [49] - else: - if self.term_truecolor: - ansi += [48, 2] + list(int_to_rgb(bg[1])) - else: - ansi += [48, 5, bg[0]] - if attr is not None: - if attr is False: - ansi += [22] - else: - if attr & ATTR_BOLD: - ansi += [1] - elif attr & ATTR_ITALIC: - # Note: is likely not to work or even be inverse in place of - # italic. Omit using this in colorschemes. - ansi += [3] - elif attr & ATTR_UNDERLINE: - ansi += [4] - r = '\033[{0}m'.format(';'.join(str(attr) for attr in ansi)) - if self.tmux_escape: - r = '\033Ptmux;' + r.replace('\033', '\033\033') + '\033\\' - elif self.screen_escape: - r = '\033P' + r.replace('\033', '\033\033') + '\033\\' - return self.escape_hl_start + r + self.escape_hl_end - - @staticmethod - def escape(string): - return string.replace('\\', '\\\\') - - -renderer = ShellRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/tmux.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/tmux.py deleted file mode 100644 index 34e4329..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/tmux.py +++ /dev/null @@ -1,44 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.renderer import Renderer -from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE - - -class TmuxRenderer(Renderer): - '''Powerline tmux segment renderer.''' - def hlstyle(self, fg=None, bg=None, attr=None): - '''Highlight a segment.''' - # We don't need to explicitly reset attributes, so skip those calls - if not attr and not bg and not fg: - return '' - tmux_attr = [] - if fg is not None: - if fg is False or fg[0] is False: - tmux_attr += ['fg=default'] - else: - tmux_attr += ['fg=colour' + str(fg[0])] - if bg is not None: - if bg is False or bg[0] is False: - tmux_attr += ['bg=default'] - else: - tmux_attr += ['bg=colour' + str(bg[0])] - if attr is not None: - if attr is False: - tmux_attr += ['nobold', 'noitalics', 'nounderscore'] - else: - if attr & ATTR_BOLD: - tmux_attr += ['bold'] - else: - tmux_attr += ['nobold'] - if attr & ATTR_ITALIC: - tmux_attr += ['italics'] - else: - tmux_attr += ['noitalics'] - if attr & ATTR_UNDERLINE: - tmux_attr += ['underscore'] - else: - tmux_attr += ['nounderscore'] - return '#[' + ','.join(tmux_attr) + ']' - - -renderer = TmuxRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/vim.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/vim.py deleted file mode 100644 index 43d1d07..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/vim.py +++ /dev/null @@ -1,154 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import - -from powerline.bindings.vim import vim_get_func -from powerline.renderer import Renderer -from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE -from powerline.theme import Theme - -import vim -import sys - - -vim_mode = vim_get_func('mode', rettype=str) -mode_translations = { - chr(ord('V') - 0x40): '^V', - chr(ord('S') - 0x40): '^S', -} - - -class VimRenderer(Renderer): - '''Powerline vim segment renderer.''' - - def __init__(self, *args, **kwargs): - if not hasattr(vim, 'strwidth'): - # Hope nobody want to change this at runtime - if vim.eval('&ambiwidth') == 'double': - kwargs = dict(**kwargs) - kwargs['ambigious'] = 2 - super(VimRenderer, self).__init__(*args, **kwargs) - self.hl_groups = {} - - def shutdown(self): - self.theme.shutdown() - for match in self.local_themes.values(): - if 'theme' in match: - match['theme'].shutdown() - - def add_local_theme(self, matcher, theme): - if matcher in self.local_themes: - raise KeyError('There is already a local theme with given matcher') - self.local_themes[matcher] = theme - - def get_theme(self, matcher_info): - for matcher in self.local_themes.keys(): - if matcher(matcher_info): - match = self.local_themes[matcher] - try: - return match['theme'] - except KeyError: - match['theme'] = Theme(theme_config=match['config'], top_theme_config=self.theme_config, **self.theme_kwargs) - return match['theme'] - else: - return self.theme - - if hasattr(vim, 'strwidth'): - if sys.version_info < (3,): - @staticmethod - def strwidth(string): - # Does not work with tabs, but neither is strwidth from default - # renderer - return vim.strwidth(string.encode('utf-8')) - else: - @staticmethod # NOQA - def strwidth(string): - return vim.strwidth(string) - - def get_segment_info(self, segment_info): - return segment_info or self.segment_info - - def render(self, window_id, winidx, current): - '''Render all segments.''' - if current: - mode = vim_mode(1) - mode = mode_translations.get(mode, mode) - else: - mode = 'nc' - segment_info = { - 'window': vim.windows[winidx], - 'mode': mode, - 'window_id': window_id, - } - segment_info['buffer'] = segment_info['window'].buffer - segment_info['bufnr'] = segment_info['buffer'].number - segment_info.update(self.segment_info) - winwidth = segment_info['window'].width - statusline = super(VimRenderer, self).render( - mode=mode, - width=winwidth, - segment_info=segment_info, - matcher_info=segment_info, - ) - return statusline - - def reset_highlight(self): - self.hl_groups.clear() - - @staticmethod - def escape(string): - return string.replace('%', '%%') - - def hlstyle(self, fg=None, bg=None, attr=None): - '''Highlight a segment. - - If an argument is None, the argument is ignored. If an argument is - False, the argument is reset to the terminal defaults. If an argument - is a valid color or attribute, it's added to the vim highlight group. - ''' - # We don't need to explicitly reset attributes in vim, so skip those calls - if not attr and not bg and not fg: - return '' - - if not (fg, bg, attr) in self.hl_groups: - hl_group = { - 'ctermfg': 'NONE', - 'guifg': None, - 'ctermbg': 'NONE', - 'guibg': None, - 'attr': ['NONE'], - 'name': '', - } - if fg is not None and fg is not False: - hl_group['ctermfg'] = fg[0] - hl_group['guifg'] = fg[1] - if bg is not None and bg is not False: - hl_group['ctermbg'] = bg[0] - hl_group['guibg'] = bg[1] - if attr: - hl_group['attr'] = [] - if attr & ATTR_BOLD: - hl_group['attr'].append('bold') - if attr & ATTR_ITALIC: - hl_group['attr'].append('italic') - if attr & ATTR_UNDERLINE: - hl_group['attr'].append('underline') - hl_group['name'] = 'Pl_' + \ - str(hl_group['ctermfg']) + '_' + \ - str(hl_group['guifg']) + '_' + \ - str(hl_group['ctermbg']) + '_' + \ - str(hl_group['guibg']) + '_' + \ - ''.join(hl_group['attr']) - self.hl_groups[(fg, bg, attr)] = hl_group - vim.command('hi {group} ctermfg={ctermfg} guifg={guifg} guibg={guibg} ctermbg={ctermbg} cterm={attr} gui={attr}'.format( - group=hl_group['name'], - ctermfg=hl_group['ctermfg'], - guifg='#{0:06x}'.format(hl_group['guifg']) if hl_group['guifg'] is not None else 'NONE', - ctermbg=hl_group['ctermbg'], - guibg='#{0:06x}'.format(hl_group['guibg']) if hl_group['guibg'] is not None else 'NONE', - attr=','.join(hl_group['attr']), - )) - return '%#' + self.hl_groups[(fg, bg, attr)]['name'] + '#' - - -renderer = VimRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/renderers/zsh_prompt.py b/common/.local/lib/python2.7/site-packages/powerline/renderers/zsh_prompt.py deleted file mode 100644 index 2e6e5e7..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/renderers/zsh_prompt.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.renderers.shell import ShellRenderer - - -class ZshPromptRenderer(ShellRenderer): - '''Powerline zsh prompt segment renderer.''' - escape_hl_start = '%{' - escape_hl_end = '%}' - - @staticmethod - def escape(string): - return string.replace('%', '%%').replace('\\', '\\\\') - - -renderer = ZshPromptRenderer diff --git a/common/.local/lib/python2.7/site-packages/powerline/segment.py b/common/.local/lib/python2.7/site-packages/powerline/segment.py deleted file mode 100644 index c96f1c0..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/segment.py +++ /dev/null @@ -1,104 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import -import sys - - -def get_segment_key(segment, theme_configs, key, module=None, default=None): - try: - return segment[key] - except KeyError: - if 'name' in segment: - name = segment['name'] - for theme_config in theme_configs: - if 'segment_data' in theme_config: - for segment_key in ((module + '.' + name, name) if module else (name,)): - try: - return theme_config['segment_data'][segment_key][key] - except KeyError: - pass - return default - - -def get_function(data, segment): - oldpath = sys.path - sys.path = data['path'] + sys.path - segment_module = str(segment.get('module', data['default_module'])) - try: - return None, getattr(__import__(segment_module, fromlist=[segment['name']]), segment['name']), segment_module - finally: - sys.path = oldpath - - -def get_string(data, segment): - return data['get_key'](segment, None, 'contents'), None, None - - -def get_filler(data, segment): - return None, None, None - - -segment_getters = { - "function": get_function, - "string": get_string, - "filler": get_filler, -} - - -def gen_segment_getter(pl, ext, path, theme_configs, default_module=None): - data = { - 'default_module': default_module or 'powerline.segments.' + ext, - 'path': path, - } - - def get_key(segment, module, key, default=None): - return get_segment_key(segment, theme_configs, key, module, default) - data['get_key'] = get_key - - def get(segment, side): - segment_type = segment.get('type', 'function') - try: - get_segment_info = segment_getters[segment_type] - except KeyError: - raise TypeError('Unknown segment type: {0}'.format(segment_type)) - - try: - contents, contents_func, module = get_segment_info(data, segment) - except Exception as e: - pl.exception('Failed to generate segment from {0!r}: {1}', segment, str(e), prefix='segment_generator') - return None - - if segment_type == 'function': - highlight_group = [module + '.' + segment['name'], segment['name']] - else: - highlight_group = segment.get('highlight_group') or segment.get('name') - - return { - 'name': segment.get('name'), - 'type': segment_type, - 'highlight_group': highlight_group, - 'divider_highlight_group': None, - 'before': get_key(segment, module, 'before', ''), - 'after': get_key(segment, module, 'after', ''), - 'contents_func': contents_func, - 'contents': contents, - 'args': get_key(segment, module, 'args', {}) if segment_type == 'function' else {}, - 'priority': segment.get('priority', None), - 'draw_hard_divider': segment.get('draw_hard_divider', True), - 'draw_soft_divider': segment.get('draw_soft_divider', True), - 'draw_inner_divider': segment.get('draw_inner_divider', False), - 'side': side, - 'exclude_modes': segment.get('exclude_modes', []), - 'include_modes': segment.get('include_modes', []), - 'width': segment.get('width'), - 'align': segment.get('align', 'l'), - 'shutdown': getattr(contents_func, 'shutdown', None), - 'startup': getattr(contents_func, 'startup', None), - '_rendered_raw': '', - '_rendered_hl': '', - '_len': 0, - '_space_left': 0, - '_space_right': 0, - } - - return get diff --git a/common/.local/lib/python2.7/site-packages/powerline/segments/__init__.py b/common/.local/lib/python2.7/site-packages/powerline/segments/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/common/.local/lib/python2.7/site-packages/powerline/segments/common.py b/common/.local/lib/python2.7/site-packages/powerline/segments/common.py deleted file mode 100644 index f7ccf1d..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/segments/common.py +++ /dev/null @@ -1,1063 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import - -import os -import sys - -from datetime import datetime -import socket -from multiprocessing import cpu_count as _cpu_count - -from powerline.lib import add_divider_highlight_group -from powerline.lib.url import urllib_read, urllib_urlencode -from powerline.lib.vcs import guess -from powerline.lib.threaded import ThreadedSegment, KwThreadedSegment, with_docstring -from powerline.lib.monotonic import monotonic -from powerline.lib.humanize_bytes import humanize_bytes -from powerline.theme import requires_segment_info -from collections import namedtuple - - -cpu_count = None - - -@requires_segment_info -def hostname(pl, segment_info, only_if_ssh=False, exclude_domain=False): - '''Return the current hostname. - - :param bool only_if_ssh: - only return the hostname if currently in an SSH session - :param bool exclude_domain: - return the hostname without domain if there is one - ''' - if only_if_ssh and not segment_info['environ'].get('SSH_CLIENT'): - return None - if exclude_domain: - return socket.gethostname().split('.')[0] - return socket.gethostname() - - -@requires_segment_info -class RepositorySegment(KwThreadedSegment): - def __init__(self): - super(RepositorySegment, self).__init__() - self.directories = {} - - @staticmethod - def key(segment_info, **kwargs): - return os.path.abspath(segment_info['getcwd']()) - - def update(self, *args): - # .compute_state() is running only in this method, and only in one - # thread, thus operations with .directories do not need write locks - # (.render() method is not using .directories). If this is changed - # .directories needs redesigning - self.directories.clear() - return super(RepositorySegment, self).update(*args) - - def compute_state(self, path): - repo = guess(path=path) - if repo: - if repo.directory in self.directories: - return self.directories[repo.directory] - else: - r = self.process_repo(repo) - self.directories[repo.directory] = r - return r - - -class RepositoryStatusSegment(RepositorySegment): - interval = 2 - - @staticmethod - def process_repo(repo): - return repo.status() - - -repository_status = with_docstring(RepositoryStatusSegment(), -'''Return the status for the current VCS repository.''') - - -class BranchSegment(RepositorySegment): - interval = 0.2 - started_repository_status = False - - @staticmethod - def process_repo(repo): - return repo.branch() - - @staticmethod - def render_one(branch, status_colors=False, **kwargs): - if branch and status_colors: - return [{ - 'contents': branch, - 'highlight_group': ['branch_dirty' if repository_status(**kwargs) else 'branch_clean', 'branch'], - }] - else: - return branch - - def startup(self, status_colors=False, **kwargs): - super(BranchSegment, self).startup(**kwargs) - if status_colors: - self.started_repository_status = True - repository_status.startup(**kwargs) - - def shutdown(self): - if self.started_repository_status: - repository_status.shutdown() - super(BranchSegment, self).shutdown() - - -branch = with_docstring(BranchSegment(), -'''Return the current VCS branch. - -:param bool status_colors: - determines whether repository status will be used to determine highlighting. Default: True. - -Highlight groups used: ``branch_clean``, ``branch_dirty``, ``branch``. -''') - - -@requires_segment_info -def cwd(pl, segment_info, dir_shorten_len=None, dir_limit_depth=None, use_path_separator=False): - '''Return the current working directory. - - Returns a segment list to create a breadcrumb-like effect. - - :param int dir_shorten_len: - shorten parent directory names to this length (e.g. :file:`/long/path/to/powerline` → :file:`/l/p/t/powerline`) - :param int dir_limit_depth: - limit directory depth to this number (e.g. :file:`/long/path/to/powerline` → :file:`⋯/to/powerline`) - :param bool use_path_separator: - Use path separator in place of soft divider. - - Divider highlight group used: ``cwd:divider``. - - Highlight groups used: ``cwd:current_folder`` or ``cwd``. It is recommended to define all highlight groups. - ''' - import re - try: - cwd = segment_info['getcwd']() - except OSError as e: - if e.errno == 2: - # user most probably deleted the directory - # this happens when removing files from Mercurial repos for example - pl.warn('Current directory not found') - cwd = "[not found]" - else: - raise - home = segment_info['home'] - if home: - cwd = re.sub('^' + re.escape(home), '~', cwd, 1) - cwd_split = cwd.split(os.sep) - cwd_split_len = len(cwd_split) - if dir_limit_depth and cwd_split_len > dir_limit_depth + 1: - del(cwd_split[0:-dir_limit_depth]) - cwd_split.insert(0, '⋯') - cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]] - ret = [] - if not cwd[0]: - cwd[0] = '/' - draw_inner_divider = not use_path_separator - for part in cwd: - if not part: - continue - if use_path_separator: - part += os.sep - ret.append({ - 'contents': part, - 'divider_highlight_group': 'cwd:divider', - 'draw_inner_divider': draw_inner_divider, - }) - ret[-1]['highlight_group'] = ['cwd:current_folder', 'cwd'] - if use_path_separator: - ret[-1]['contents'] = ret[-1]['contents'][:-1] - return ret - - -def date(pl, format='%Y-%m-%d', istime=False): - '''Return the current date. - - :param str format: - strftime-style date format string - - Divider highlight group used: ``time:divider``. - - Highlight groups used: ``time`` or ``date``. - ''' - return [{ - 'contents': datetime.now().strftime(format), - 'highlight_group': (['time'] if istime else []) + ['date'], - 'divider_highlight_group': 'time:divider' if istime else None, - }] - - -def fuzzy_time(pl): - '''Display the current time as fuzzy time, e.g. "quarter past six".''' - hour_str = ['twelve', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven'] - minute_str = { - 5: 'five past', - 10: 'ten past', - 15: 'quarter past', - 20: 'twenty past', - 25: 'twenty-five past', - 30: 'half past', - 35: 'twenty-five to', - 40: 'twenty to', - 45: 'quarter to', - 50: 'ten to', - 55: 'five to', - } - special_case_str = { - (23, 58): 'round about midnight', - (23, 59): 'round about midnight', - (0, 0): 'midnight', - (0, 1): 'round about midnight', - (0, 2): 'round about midnight', - (12, 0): 'noon', - } - - now = datetime.now() - - try: - return special_case_str[(now.hour, now.minute)] - except KeyError: - pass - - hour = now.hour - if now.minute > 32: - if hour == 23: - hour = 0 - else: - hour += 1 - if hour > 11: - hour = hour - 12 - hour = hour_str[hour] - - minute = int(round(now.minute / 5.0) * 5) - if minute == 60 or minute == 0: - return ' '.join([hour, 'o\'clock']) - else: - minute = minute_str[minute] - return ' '.join([minute, hour]) - - -def _external_ip(query_url='http://ipv4.icanhazip.com/'): - return urllib_read(query_url).strip() - - -class ExternalIpSegment(ThreadedSegment): - interval = 300 - - def set_state(self, query_url='http://ipv4.icanhazip.com/', **kwargs): - self.query_url = query_url - super(ExternalIpSegment, self).set_state(**kwargs) - - def update(self, old_ip): - return _external_ip(query_url=self.query_url) - - def render(self, ip, **kwargs): - if not ip: - return None - return [{'contents': ip, 'divider_highlight_group': 'background:divider'}] - - -external_ip = with_docstring(ExternalIpSegment(), -'''Return external IP address. - -Suggested URIs: - -* http://ipv4.icanhazip.com/ -* http://ipv6.icanhazip.com/ -* http://icanhazip.com/ (returns IPv6 address if available, else IPv4) - -:param str query_url: - URI to query for IP address, should return only the IP address as a text string - -Divider highlight group used: ``background:divider``. -''') - - -# Weather condition code descriptions available at -# http://developer.yahoo.com/weather/#codes -weather_conditions_codes = ( - ('tornado', 'stormy'), # 0 - ('tropical_storm', 'stormy'), # 1 - ('hurricane', 'stormy'), # 2 - ('severe_thunderstorms', 'stormy'), # 3 - ('thunderstorms', 'stormy'), # 4 - ('mixed_rain_and_snow', 'rainy' ), # 5 - ('mixed_rain_and_sleet', 'rainy' ), # 6 - ('mixed_snow_and_sleet', 'snowy' ), # 7 - ('freezing_drizzle', 'rainy' ), # 8 - ('drizzle', 'rainy' ), # 9 - ('freezing_rain', 'rainy' ), # 10 - ('showers', 'rainy' ), # 11 - ('showers', 'rainy' ), # 12 - ('snow_flurries', 'snowy' ), # 13 - ('light_snow_showers', 'snowy' ), # 14 - ('blowing_snow', 'snowy' ), # 15 - ('snow', 'snowy' ), # 16 - ('hail', 'snowy' ), # 17 - ('sleet', 'snowy' ), # 18 - ('dust', 'foggy' ), # 19 - ('fog', 'foggy' ), # 20 - ('haze', 'foggy' ), # 21 - ('smoky', 'foggy' ), # 22 - ('blustery', 'foggy' ), # 23 - ('windy', ), # 24 - ('cold', 'day' ), # 25 - ('clouds', 'cloudy'), # 26 - ('mostly_cloudy_night', 'cloudy'), # 27 - ('mostly_cloudy_day', 'cloudy'), # 28 - ('partly_cloudy_night', 'cloudy'), # 29 - ('partly_cloudy_day', 'cloudy'), # 30 - ('clear_night', 'night' ), # 31 - ('sun', 'sunny' ), # 32 - ('fair_night', 'night' ), # 33 - ('fair_day', 'day' ), # 34 - ('mixed_rain_and_hail', 'rainy' ), # 35 - ('hot', 'sunny' ), # 36 - ('isolated_thunderstorms', 'stormy'), # 37 - ('scattered_thunderstorms', 'stormy'), # 38 - ('scattered_thunderstorms', 'stormy'), # 39 - ('scattered_showers', 'rainy' ), # 40 - ('heavy_snow', 'snowy' ), # 41 - ('scattered_snow_showers', 'snowy' ), # 42 - ('heavy_snow', 'snowy' ), # 43 - ('partly_cloudy', 'cloudy'), # 44 - ('thundershowers', 'rainy' ), # 45 - ('snow_showers', 'snowy' ), # 46 - ('isolated_thundershowers', 'rainy' ), # 47 -) -# ('day', (25, 34)), -# ('rainy', (5, 6, 8, 9, 10, 11, 12, 35, 40, 45, 47)), -# ('cloudy', (26, 27, 28, 29, 30, 44)), -# ('snowy', (7, 13, 14, 15, 16, 17, 18, 41, 42, 43, 46)), -# ('stormy', (0, 1, 2, 3, 4, 37, 38, 39)), -# ('foggy', (19, 20, 21, 22, 23)), -# ('sunny', (32, 36)), -# ('night', (31, 33))): -weather_conditions_icons = { - 'day': '〇', - 'blustery': '⚑', - 'rainy': '☔', - 'cloudy': '☁', - 'snowy': '❅', - 'stormy': '☈', - 'foggy': '〰', - 'sunny': '☼', - 'night': '☾', - 'windy': '☴', - 'not_available': '�', - 'unknown': '⚠', -} - -temp_conversions = { - 'C': lambda temp: temp, - 'F': lambda temp: (temp * 9 / 5) + 32, - 'K': lambda temp: temp + 273.15, -} - -# Note: there are also unicode characters for units: ℃, ℉ and K -temp_units = { - 'C': '°C', - 'F': '°F', - 'K': 'K', -} - - -class WeatherSegment(ThreadedSegment): - interval = 600 - - def set_state(self, location_query=None, **kwargs): - self.location = location_query - self.url = None - super(WeatherSegment, self).set_state(**kwargs) - - def update(self, old_weather): - import json - - if not self.url: - # Do not lock attribute assignments in this branch: they are used - # only in .update() - if not self.location: - location_data = json.loads(urllib_read('http://freegeoip.net/json/' + _external_ip())) - self.location = ','.join([location_data['city'], - location_data['region_name'], - location_data['country_name']]) - query_data = { - 'q': - 'use "http://github.com/yql/yql-tables/raw/master/weather/weather.bylocation.xml" as we;' - 'select * from we where location="{0}" and unit="c"'.format(self.location).encode('utf-8'), - 'format': 'json', - } - self.url = 'http://query.yahooapis.com/v1/public/yql?' + urllib_urlencode(query_data) - - raw_response = urllib_read(self.url) - if not raw_response: - self.error('Failed to get response') - return - response = json.loads(raw_response) - condition = response['query']['results']['weather']['rss']['channel']['item']['condition'] - condition_code = int(condition['code']) - temp = float(condition['temp']) - - try: - icon_names = weather_conditions_codes[condition_code] - except IndexError: - if condition_code == 3200: - icon_names = ('not_available',) - self.warn('Weather is not available for location {0}', self.location) - else: - icon_names = ('unknown',) - self.error('Unknown condition code: {0}', condition_code) - - return (temp, icon_names) - - def render(self, weather, icons=None, unit='C', temp_format=None, temp_coldest=-30, temp_hottest=40, **kwargs): - if not weather: - return None - - temp, icon_names = weather - - for icon_name in icon_names: - if icons: - if icon_name in icons: - icon = icons[icon_name] - break - else: - icon = weather_conditions_icons[icon_names[-1]] - - temp_format = temp_format or ('{temp:.0f}' + temp_units[unit]) - converted_temp = temp_conversions[unit](temp) - if temp <= temp_coldest: - gradient_level = 0 - elif temp >= temp_hottest: - gradient_level = 100 - else: - gradient_level = (temp - temp_coldest) * 100.0 / (temp_hottest - temp_coldest) - groups = ['weather_condition_' + icon_name for icon_name in icon_names] + ['weather_conditions', 'weather'] - return [ - { - 'contents': icon + ' ', - 'highlight_group': groups, - 'divider_highlight_group': 'background:divider', - }, - { - 'contents': temp_format.format(temp=converted_temp), - 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], - 'divider_highlight_group': 'background:divider', - 'gradient_level': gradient_level, - }, - ] - - -weather = with_docstring(WeatherSegment(), -'''Return weather from Yahoo! Weather. - -Uses GeoIP lookup from http://freegeoip.net/ to automatically determine -your current location. This should be changed if you're in a VPN or if your -IP address is registered at another location. - -Returns a list of colorized icon and temperature segments depending on -weather conditions. - -:param str unit: - temperature unit, can be one of ``F``, ``C`` or ``K`` -:param str location_query: - location query for your current location, e.g. ``oslo, norway`` -:param dict icons: - dict for overriding default icons, e.g. ``{'heavy_snow' : u'❆'}`` -:param str temp_format: - format string, receives ``temp`` as an argument. Should also hold unit. -:param float temp_coldest: - coldest temperature. Any temperature below it will have gradient level equal - to zero. -:param float temp_hottest: - hottest temperature. Any temperature above it will have gradient level equal - to 100. Temperatures between ``temp_coldest`` and ``temp_hottest`` receive - gradient level that indicates relative position in this interval - (``100 * (cur-coldest) / (hottest-coldest)``). - -Divider highlight group used: ``background:divider``. - -Highlight groups used: ``weather_conditions`` or ``weather``, ``weather_temp_gradient`` (gradient) or ``weather``. -Also uses ``weather_conditions_{condition}`` for all weather conditions supported by Yahoo. -''') - - -def system_load(pl, format='{avg:.1f}', threshold_good=1, threshold_bad=2, track_cpu_count=False): - '''Return system load average. - - Highlights using ``system_load_good``, ``system_load_bad`` and - ``system_load_ugly`` highlighting groups, depending on the thresholds - passed to the function. - - :param str format: - format string, receives ``avg`` as an argument - :param float threshold_good: - threshold for gradient level 0: any normalized load average below this - value will have this gradient level. - :param float threshold_bad: - threshold for gradient level 100: any normalized load average above this - value will have this gradient level. Load averages between - ``threshold_good`` and ``threshold_bad`` receive gradient level that - indicates relative position in this interval: - (``100 * (cur-good) / (bad-good)``). - Note: both parameters are checked against normalized load averages. - :param bool track_cpu_count: - if True powerline will continuously poll the system to detect changes - in the number of CPUs. - - Divider highlight group used: ``background:divider``. - - Highlight groups used: ``system_load_gradient`` (gradient) or ``system_load``. - ''' - global cpu_count - try: - cpu_num = cpu_count = _cpu_count() if cpu_count is None or track_cpu_count else cpu_count - except NotImplementedError: - pl.warn('Unable to get CPU count: method is not implemented') - return None - ret = [] - for avg in os.getloadavg(): - normalized = avg / cpu_num - if normalized < threshold_good: - gradient_level = 0 - elif normalized < threshold_bad: - gradient_level = (normalized - threshold_good) * 100.0 / (threshold_bad - threshold_good) - else: - gradient_level = 100 - ret.append({ - 'contents': format.format(avg=avg), - 'highlight_group': ['system_load_gradient', 'system_load'], - 'divider_highlight_group': 'background:divider', - 'gradient_level': gradient_level, - }) - ret[0]['contents'] += ' ' - ret[1]['contents'] += ' ' - return ret - - -try: - import psutil - - def _get_bytes(interface): - io_counters = psutil.network_io_counters(pernic=True) - if_io = io_counters.get(interface) - if not if_io: - return None - return if_io.bytes_recv, if_io.bytes_sent - - def _get_interfaces(): - io_counters = psutil.network_io_counters(pernic=True) - for interface, data in io_counters.items(): - if data: - yield interface, data.bytes_recv, data.bytes_sent - - def _get_user(segment_info): - return psutil.Process(os.getpid()).username - - class CPULoadPercentSegment(ThreadedSegment): - interval = 1 - - def update(self, old_cpu): - return psutil.cpu_percent(interval=None) - - def run(self): - while not self.shutdown_event.is_set(): - try: - self.update_value = psutil.cpu_percent(interval=self.interval) - except Exception as e: - self.exception('Exception while calculating cpu_percent: {0}', str(e)) - - def render(self, cpu_percent, format='{0:.0f}%', **kwargs): - if not cpu_percent: - return None - return [{ - 'contents': format.format(cpu_percent), - 'gradient_level': cpu_percent, - 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], - }] -except ImportError: - def _get_bytes(interface): # NOQA - with open('/sys/class/net/{interface}/statistics/rx_bytes'.format(interface=interface), 'rb') as file_obj: - rx = int(file_obj.read()) - with open('/sys/class/net/{interface}/statistics/tx_bytes'.format(interface=interface), 'rb') as file_obj: - tx = int(file_obj.read()) - return (rx, tx) - - def _get_interfaces(): # NOQA - for interface in os.listdir('/sys/class/net'): - x = _get_bytes(interface) - if x is not None: - yield interface, x[0], x[1] - - def _get_user(segment_info): # NOQA - return segment_info['environ'].get('USER', None) - - class CPULoadPercentSegment(ThreadedSegment): # NOQA - interval = 1 - - @staticmethod - def startup(**kwargs): - pass - - @staticmethod - def start(): - pass - - @staticmethod - def shutdown(): - pass - - @staticmethod - def render(cpu_percent, pl, format='{0:.0f}%', **kwargs): - pl.warn('psutil package is not installed, thus CPU load is not available') - return None - - -cpu_load_percent = with_docstring(CPULoadPercentSegment(), -'''Return the average CPU load as a percentage. - -Requires the ``psutil`` module. - -:param str format: - Output format. Accepts measured CPU load as the first argument. - -Highlight groups used: ``cpu_load_percent_gradient`` (gradient) or ``cpu_load_percent``. -''') - - -username = False -# os.geteuid is not available on windows -_geteuid = getattr(os, 'geteuid', lambda: 1) - - -def user(pl, segment_info=None): - '''Return the current user. - - Highlights the user with the ``superuser`` if the effective user ID is 0. - - Highlight groups used: ``superuser`` or ``user``. It is recommended to define all highlight groups. - ''' - global username - if username is False: - username = _get_user(segment_info) - if username is None: - pl.warn('Failed to get username') - return None - euid = _geteuid() - return [{ - 'contents': username, - 'highlight_group': 'user' if euid != 0 else ['superuser', 'user'], - }] -if 'psutil' not in globals(): - user = requires_segment_info(user) - - -if os.path.exists('/proc/uptime'): - def _get_uptime(): - with open('/proc/uptime', 'r') as f: - return int(float(f.readline().split()[0])) -elif 'psutil' in globals(): - from time import time - - def _get_uptime(): # NOQA - # psutil.BOOT_TIME is not subject to clock adjustments, but time() is. - # Thus it is a fallback to /proc/uptime reading and not the reverse. - return int(time() - psutil.BOOT_TIME) -else: - def _get_uptime(): # NOQA - raise NotImplementedError - - -@add_divider_highlight_group('background:divider') -def uptime(pl, days_format='{days:d}d', hours_format=' {hours:d}h', minutes_format=' {minutes:d}m', seconds_format=' {seconds:d}s', shorten_len=3): - '''Return system uptime. - - :param str days_format: - day format string, will be passed ``days`` as the argument - :param str hours_format: - hour format string, will be passed ``hours`` as the argument - :param str minutes_format: - minute format string, will be passed ``minutes`` as the argument - :param str seconds_format: - second format string, will be passed ``seconds`` as the argument - :param int shorten_len: - shorten the amount of units (days, hours, etc.) displayed - - Divider highlight group used: ``background:divider``. - ''' - try: - seconds = _get_uptime() - except NotImplementedError: - pl.warn('Unable to get uptime. You should install psutil package') - return None - minutes, seconds = divmod(seconds, 60) - hours, minutes = divmod(minutes, 60) - days, hours = divmod(hours, 24) - time_formatted = list(filter(None, [ - days_format.format(days=days) if days and days_format else None, - hours_format.format(hours=hours) if hours and hours_format else None, - minutes_format.format(minutes=minutes) if minutes and minutes_format else None, - seconds_format.format(seconds=seconds) if seconds and seconds_format else None, - ]))[0:shorten_len] - return ''.join(time_formatted).strip() - - -class NetworkLoadSegment(KwThreadedSegment): - import re - interfaces = {} - replace_num_pat = re.compile(r'[a-zA-Z]+') - - @staticmethod - def key(interface='detect', **kwargs): - return interface - - def compute_state(self, interface): - if interface == 'detect': - proc_exists = getattr(self, 'proc_exists', None) - if proc_exists is None: - proc_exists = self.proc_exists = os.path.exists('/proc/net/route') - if proc_exists: - # Look for default interface in routing table - with open('/proc/net/route', 'rb') as f: - for line in f.readlines(): - parts = line.split() - if len(parts) > 1: - iface, destination = parts[:2] - if not destination.replace(b'0', b''): - interface = iface.decode('utf-8') - break - if interface == 'detect': - # Choose interface with most total activity, excluding some - # well known interface names - interface, total = 'eth0', -1 - for name, rx, tx in _get_interfaces(): - base = self.replace_num_pat.match(name) - if None in (base, rx, tx) or base.group() in ('lo', 'vmnet', 'sit'): - continue - activity = rx + tx - if activity > total: - total = activity - interface = name - - if interface in self.interfaces: - idata = self.interfaces[interface] - try: - idata['prev'] = idata['last'] - except KeyError: - pass - else: - idata = {} - if self.run_once: - idata['prev'] = (monotonic(), _get_bytes(interface)) - self.shutdown_event.wait(self.interval) - self.interfaces[interface] = idata - - idata['last'] = (monotonic(), _get_bytes(interface)) - return idata - - def render_one(self, idata, recv_format='⬇ {value:>8}', sent_format='⬆ {value:>8}', suffix='B/s', si_prefix=False, **kwargs): - if not idata or 'prev' not in idata: - return None - - t1, b1 = idata['prev'] - t2, b2 = idata['last'] - measure_interval = t2 - t1 - - if None in (b1, b2): - return None - if measure_interval == 0: - self.error('Measure interval is zero. This should not happen') - return None - - r = [] - for i, key in zip((0, 1), ('recv', 'sent')): - format = locals()[key + '_format'] - value = (b2[i] - b1[i]) / measure_interval - max_key = key + '_max' - is_gradient = max_key in kwargs - hl_groups = ['network_load_' + key, 'network_load'] - if is_gradient: - hl_groups[:0] = (group + '_gradient' for group in hl_groups) - r.append({ - 'contents': format.format(value=humanize_bytes(value, suffix, si_prefix)), - 'divider_highlight_group': 'background:divider', - 'highlight_group': hl_groups, - }) - if is_gradient: - max = kwargs[max_key] - if value >= max: - r[-1]['gradient_level'] = 100 - else: - r[-1]['gradient_level'] = value * 100.0 / max - - return r - - -network_load = with_docstring(NetworkLoadSegment(), -'''Return the network load. - -Uses the ``psutil`` module if available for multi-platform compatibility, -falls back to reading -:file:`/sys/class/net/{interface}/statistics/{rx,tx}_bytes`. - -:param str interface: - network interface to measure (use the special value "detect" to have powerline try to auto-detect the network interface) -:param str suffix: - string appended to each load string -:param bool si_prefix: - use SI prefix, e.g. MB instead of MiB -:param str recv_format: - format string, receives ``value`` as argument -:param str sent_format: - format string, receives ``value`` as argument -:param float recv_max: - maximum number of received bytes per second. Is only used to compute - gradient level -:param float sent_max: - maximum number of sent bytes per second. Is only used to compute gradient - level - -Divider highlight group used: ``background:divider``. - -Highlight groups used: ``network_load_sent_gradient`` (gradient) or ``network_load_recv_gradient`` (gradient) or ``network_load_gradient`` (gradient), ``network_load_sent`` or ``network_load_recv`` or ``network_load``. -''') - - -@requires_segment_info -def virtualenv(pl, segment_info): - '''Return the name of the current Python virtualenv.''' - return os.path.basename(segment_info['environ'].get('VIRTUAL_ENV', '')) or None - - -_IMAPKey = namedtuple('Key', 'username password server port folder') - - -class EmailIMAPSegment(KwThreadedSegment): - interval = 60 - - @staticmethod - def key(username, password, server='imap.gmail.com', port=993, folder='INBOX', **kwargs): - return _IMAPKey(username, password, server, port, folder) - - def compute_state(self, key): - if not key.username or not key.password: - self.warn('Username and password are not configured') - return None - try: - import imaplib - import re - mail = imaplib.IMAP4_SSL(key.server, key.port) - mail.login(key.username, key.password) - rc, message = mail.status(key.folder, '(UNSEEN)') - unread_str = message[0].decode('utf-8') - unread_count = int(re.search('UNSEEN (\d+)', unread_str).group(1)) - except imaplib.IMAP4.error as e: - unread_count = str(e) - return unread_count - - @staticmethod - def render_one(unread_count, max_msgs=None, **kwargs): - if not unread_count: - return None - elif type(unread_count) != int or not max_msgs: - return [{ - 'contents': str(unread_count), - 'highlight_group': 'email_alert', - }] - else: - return [{ - 'contents': str(unread_count), - 'highlight_group': ['email_alert_gradient', 'email_alert'], - 'gradient_level': min(unread_count * 100.0 / max_msgs, 100), - }] - - -email_imap_alert = with_docstring(EmailIMAPSegment(), -'''Return unread e-mail count for IMAP servers. - -:param str username: - login username -:param str password: - login password -:param str server: - e-mail server -:param int port: - e-mail server port -:param str folder: - folder to check for e-mails -:param int max_msgs: - Maximum number of messages. If there are more messages then max_msgs then it - will use gradient level equal to 100, otherwise gradient level is equal to - ``100 * msgs_num / max_msgs``. If not present gradient is not computed. - -Highlight groups used: ``email_alert_gradient`` (gradient), ``email_alert``. -''') - - -class NowPlayingSegment(object): - STATE_SYMBOLS = { - 'fallback': '♫', - 'play': '▶', - 'pause': '▮▮', - 'stop': '■', - } - - def __call__(self, player='mpd', format='{state_symbol} {artist} - {title} ({total})', **kwargs): - player_func = getattr(self, 'player_{0}'.format(player)) - stats = { - 'state': None, - 'state_symbol': self.STATE_SYMBOLS['fallback'], - 'album': None, - 'artist': None, - 'title': None, - 'elapsed': None, - 'total': None, - } - func_stats = player_func(**kwargs) - if not func_stats: - return None - stats.update(func_stats) - return format.format(**stats) - - @staticmethod - def _run_cmd(cmd): - from subprocess import Popen, PIPE - try: - p = Popen(cmd, stdout=PIPE) - stdout, err = p.communicate() - except OSError as e: - sys.stderr.write('Could not execute command ({0}): {1}\n'.format(e, cmd)) - return None - return stdout.strip() - - @staticmethod - def _convert_state(state): - state = state.lower() - if 'play' in state: - return 'play' - if 'pause' in state: - return 'pause' - if 'stop' in state: - return 'stop' - - @staticmethod - def _convert_seconds(seconds): - return '{0:.0f}:{1:02.0f}'.format(*divmod(float(seconds), 60)) - - def player_cmus(self, pl): - '''Return cmus player information. - - cmus-remote -Q returns data with multi-level information i.e. - status playing - file - tag artist - tag title - tag .. - tag n - set continue - set repeat - set .. - set n - - For the information we are looking for we don't really care if we're on - the tag level or the set level. The dictionary comprehension in this - method takes anything in ignore_levels and brings the key inside that - to the first level of the dictionary. - ''' - now_playing_str = self._run_cmd(['cmus-remote', '-Q']) - if not now_playing_str: - return - ignore_levels = ('tag', 'set',) - now_playing = dict(((token[0] if token[0] not in ignore_levels else token[1], - (' '.join(token[1:]) if token[0] not in ignore_levels else - ' '.join(token[2:]))) for token in [line.split(' ') for line in now_playing_str.split('\n')[:-1]])) - state = self._convert_state(now_playing.get('status')) - return { - 'state': state, - 'state_symbol': self.STATE_SYMBOLS.get(state), - 'album': now_playing.get('album'), - 'artist': now_playing.get('artist'), - 'title': now_playing.get('title'), - 'elapsed': self._convert_seconds(now_playing.get('position', 0)), - 'total': self._convert_seconds(now_playing.get('duration', 0)), - } - - def player_mpd(self, pl, host='localhost', port=6600): - try: - import mpd - client = mpd.MPDClient() - client.connect(host, port) - now_playing = client.currentsong() - if not now_playing: - return - status = client.status() - client.close() - client.disconnect() - return { - 'state': status.get('state'), - 'state_symbol': self.STATE_SYMBOLS.get(status.get('state')), - 'album': now_playing.get('album'), - 'artist': now_playing.get('artist'), - 'title': now_playing.get('title'), - 'elapsed': self._convert_seconds(now_playing.get('elapsed', 0)), - 'total': self._convert_seconds(now_playing.get('time', 0)), - } - except ImportError: - now_playing = self._run_cmd(['mpc', 'current', '-f', '%album%\n%artist%\n%title%\n%time%', '-h', str(host), '-p', str(port)]) - if not now_playing: - return - now_playing = now_playing.split('\n') - return { - 'album': now_playing[0], - 'artist': now_playing[1], - 'title': now_playing[2], - 'total': now_playing[3], - } - - def player_spotify(self, pl): - try: - import dbus - except ImportError: - sys.stderr.write('Could not add Spotify segment: Requires python-dbus.\n') - return - bus = dbus.SessionBus() - DBUS_IFACE_PROPERTIES = 'org.freedesktop.DBus.Properties' - DBUS_IFACE_PLAYER = 'org.freedesktop.MediaPlayer2' - try: - player = bus.get_object('com.spotify.qt', '/') - iface = dbus.Interface(player, DBUS_IFACE_PROPERTIES) - info = iface.Get(DBUS_IFACE_PLAYER, 'Metadata') - status = iface.Get(DBUS_IFACE_PLAYER, 'PlaybackStatus') - except dbus.exceptions.DBusException: - return - if not info: - return - state = self._convert_state(status) - return { - 'state': state, - 'state_symbol': self.STATE_SYMBOLS.get(state), - 'album': info.get('xesam:album'), - 'artist': info.get('xesam:artist')[0], - 'title': info.get('xesam:title'), - 'total': self._convert_seconds(info.get('mpris:length') / 1e6), - } - - def player_rhythmbox(self, pl): - now_playing = self._run_cmd(['rhythmbox-client', '--no-start', '--no-present', '--print-playing-format', '%at\n%aa\n%tt\n%te\n%td']) - if not now_playing: - return - now_playing = now_playing.split('\n') - return { - 'album': now_playing[0], - 'artist': now_playing[1], - 'title': now_playing[2], - 'elapsed': now_playing[3], - 'total': now_playing[4], - } -now_playing = NowPlayingSegment() diff --git a/common/.local/lib/python2.7/site-packages/powerline/segments/ipython.py b/common/.local/lib/python2.7/site-packages/powerline/segments/ipython.py deleted file mode 100644 index 9a29ea8..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/segments/ipython.py +++ /dev/null @@ -1,8 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.theme import requires_segment_info - - -@requires_segment_info -def prompt_count(pl, segment_info): - return str(segment_info['ipython'].prompt_count) diff --git a/common/.local/lib/python2.7/site-packages/powerline/segments/shell.py b/common/.local/lib/python2.7/site-packages/powerline/segments/shell.py deleted file mode 100644 index e870048..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/segments/shell.py +++ /dev/null @@ -1,28 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline.theme import requires_segment_info - - -@requires_segment_info -def last_status(pl, segment_info): - '''Return last exit code. - - Highlight groups used: ``exit_fail`` - ''' - if not segment_info['args'].last_exit_code: - return None - return [{'contents': str(segment_info['args'].last_exit_code), 'highlight_group': 'exit_fail'}] - - -@requires_segment_info -def last_pipe_status(pl, segment_info): - '''Return last pipe status. - - Highlight groups used: ``exit_fail``, ``exit_success`` - ''' - last_pipe_status = segment_info['args'].last_pipe_status - if any(last_pipe_status): - return [{'contents': str(status), 'highlight_group': 'exit_fail' if status else 'exit_success', 'draw_inner_divider': True} - for status in last_pipe_status] - else: - return None diff --git a/common/.local/lib/python2.7/site-packages/powerline/segments/vim.py b/common/.local/lib/python2.7/site-packages/powerline/segments/vim.py deleted file mode 100644 index 21dff03..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/segments/vim.py +++ /dev/null @@ -1,443 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import, division - -import os -try: - import vim -except ImportError: - vim = {} # NOQA - -from powerline.bindings.vim import vim_get_func, getbufvar -from powerline.theme import requires_segment_info -from powerline.lib import add_divider_highlight_group -from powerline.lib.vcs import guess -from powerline.lib.humanize_bytes import humanize_bytes -from powerline.lib.threaded import KwThreadedSegment, with_docstring -from powerline.lib import wraps_saveargs as wraps -from collections import defaultdict - -vim_funcs = { - 'virtcol': vim_get_func('virtcol', rettype=int), - 'fnamemodify': vim_get_func('fnamemodify', rettype=str), - 'expand': vim_get_func('expand', rettype=str), - 'bufnr': vim_get_func('bufnr', rettype=int), - 'line2byte': vim_get_func('line2byte', rettype=int), -} - -vim_modes = { - 'n': 'NORMAL', - 'no': 'N·OPER', - 'v': 'VISUAL', - 'V': 'V·LINE', - '^V': 'V·BLCK', - 's': 'SELECT', - 'S': 'S·LINE', - '^S': 'S·BLCK', - 'i': 'INSERT', - 'R': 'REPLACE', - 'Rv': 'V·RPLCE', - 'c': 'COMMND', - 'cv': 'VIM EX', - 'ce': 'EX', - 'r': 'PROMPT', - 'rm': 'MORE', - 'r?': 'CONFIRM', - '!': 'SHELL', -} - - -eventfuncs = defaultdict(lambda: []) -bufeventfuncs = defaultdict(lambda: []) -defined_events = set() - - -def purgeonevents_reg(func, events, is_buffer_event=False): - if is_buffer_event: - cureventfuncs = bufeventfuncs - else: - cureventfuncs = eventfuncs - for event in events: - if event not in defined_events: - vim.eval('PowerlineRegisterCachePurgerEvent("' + event + '")') - defined_events.add(event) - cureventfuncs[event].append(func) - - -def launchevent(event): - global eventfuncs - global bufeventfuncs - for func in eventfuncs[event]: - func() - if bufeventfuncs[event]: - buffer = vim.buffers[int(vim_funcs['expand']('')) - 1] - for func in bufeventfuncs[event]: - func(buffer) - - -# TODO Remove cache when needed -def window_cached(func): - cache = {} - - @requires_segment_info - @wraps(func) - def ret(segment_info, **kwargs): - window_id = segment_info['window_id'] - if segment_info['mode'] == 'nc': - return cache.get(window_id) - else: - r = func(**kwargs) - cache[window_id] = r - return r - - return ret - - -@requires_segment_info -def mode(pl, segment_info, override=None): - '''Return the current vim mode. - - :param dict override: - dict for overriding default mode strings, e.g. ``{ 'n': 'NORM' }`` - ''' - mode = segment_info['mode'] - if mode == 'nc': - return None - if not override: - return vim_modes[mode] - try: - return override[mode] - except KeyError: - return vim_modes[mode] - - -@requires_segment_info -def modified_indicator(pl, segment_info, text='+'): - '''Return a file modified indicator. - - :param string text: - text to display if the current buffer is modified - ''' - return text if int(getbufvar(segment_info['bufnr'], '&modified')) else None - - -@requires_segment_info -def paste_indicator(pl, segment_info, text='PASTE'): - '''Return a paste mode indicator. - - :param string text: - text to display if paste mode is enabled - ''' - return text if int(vim.eval('&paste')) else None - - -@requires_segment_info -def readonly_indicator(pl, segment_info, text=''): - '''Return a read-only indicator. - - :param string text: - text to display if the current buffer is read-only - ''' - return text if int(getbufvar(segment_info['bufnr'], '&readonly')) else None - - -@requires_segment_info -def file_directory(pl, segment_info, shorten_user=True, shorten_cwd=True, shorten_home=False): - '''Return file directory (head component of the file path). - - :param bool shorten_user: - shorten ``$HOME`` directory to :file:`~/` - - :param bool shorten_cwd: - shorten current directory to :file:`./` - - :param bool shorten_home: - shorten all directories in :file:`/home/` to :file:`~user/` instead of :file:`/home/user/`. - ''' - name = segment_info['buffer'].name - if not name: - return None - file_directory = vim_funcs['fnamemodify'](name, (':~' if shorten_user else '') - + (':.' if shorten_cwd else '') + ':h') - if shorten_home and file_directory.startswith('/home/'): - file_directory = '~' + file_directory[6:] - return file_directory + os.sep if file_directory else None - - -@requires_segment_info -def file_name(pl, segment_info, display_no_file=False, no_file_text='[No file]'): - '''Return file name (tail component of the file path). - - :param bool display_no_file: - display a string if the buffer is missing a file name - :param str no_file_text: - the string to display if the buffer is missing a file name - - Highlight groups used: ``file_name_no_file`` or ``file_name``, ``file_name``. - ''' - name = segment_info['buffer'].name - if not name: - if display_no_file: - return [{ - 'contents': no_file_text, - 'highlight_group': ['file_name_no_file', 'file_name'], - }] - else: - return None - file_name = vim_funcs['fnamemodify'](name, ':~:.:t').decode('utf-8') - return file_name - - -@window_cached -def file_size(pl, suffix='B', si_prefix=False): - '''Return file size in &encoding. - - :param str suffix: - string appended to the file size - :param bool si_prefix: - use SI prefix, e.g. MB instead of MiB - :return: file size or None if the file isn't saved or if the size is too big to fit in a number - ''' - # Note: returns file size in &encoding, not in &fileencoding. But returned - # size is updated immediately; and it is valid for any buffer - file_size = vim_funcs['line2byte'](len(vim.current.buffer) + 1) - 1 - return humanize_bytes(file_size, suffix, si_prefix) - - -@requires_segment_info -@add_divider_highlight_group('background:divider') -def file_format(pl, segment_info): - '''Return file format (i.e. line ending type). - - :return: file format or None if unknown or missing file format - - Divider highlight group used: ``background:divider``. - ''' - return getbufvar(segment_info['bufnr'], '&fileformat') or None - - -@requires_segment_info -@add_divider_highlight_group('background:divider') -def file_encoding(pl, segment_info): - '''Return file encoding/character set. - - :return: file encoding/character set or None if unknown or missing file encoding - - Divider highlight group used: ``background:divider``. - ''' - return getbufvar(segment_info['bufnr'], '&fileencoding') or None - - -@requires_segment_info -@add_divider_highlight_group('background:divider') -def file_type(pl, segment_info): - '''Return file type. - - :return: file type or None if unknown file type - - Divider highlight group used: ``background:divider``. - ''' - return getbufvar(segment_info['bufnr'], '&filetype') or None - - -@requires_segment_info -def line_percent(pl, segment_info, gradient=False): - '''Return the cursor position in the file as a percentage. - - :param bool gradient: - highlight the percentage with a color gradient (by default a green to red gradient) - - Highlight groups used: ``line_percent_gradient`` (gradient), ``line_percent``. - ''' - line_current = segment_info['window'].cursor[0] - line_last = len(segment_info['buffer']) - percentage = line_current * 100.0 / line_last - if not gradient: - return str(int(round(percentage))) - return [{ - 'contents': str(int(round(percentage))), - 'highlight_group': ['line_percent_gradient', 'line_percent'], - 'gradient_level': percentage, - }] - - -@requires_segment_info -def line_current(pl, segment_info): - '''Return the current cursor line.''' - return str(segment_info['window'].cursor[0]) - - -@requires_segment_info -def col_current(pl, segment_info): - '''Return the current cursor column. - ''' - return str(segment_info['window'].cursor[1] + 1) - - -# TODO Add &textwidth-based gradient -@window_cached -def virtcol_current(pl, gradient=True): - '''Return current visual column with concealed characters ingored - - :param bool gradient: - Determines whether it should show textwidth-based gradient (gradient level is ``virtcol * 100 / textwidth``). - - Highlight groups used: ``virtcol_current_gradient`` (gradient), ``virtcol_current`` or ``col_current``. - ''' - col = vim_funcs['virtcol']('.') - r = [{'contents': str(col), 'highlight_group': ['virtcol_current', 'col_current']}] - if gradient: - textwidth = int(getbufvar('%', '&textwidth')) - r[-1]['gradient_level'] = min(col * 100 / textwidth, 100) if textwidth else 0 - r[-1]['highlight_group'].insert(0, 'virtcol_current_gradient') - return r - - -def modified_buffers(pl, text='+ ', join_str=','): - '''Return a comma-separated list of modified buffers. - - :param str text: - text to display before the modified buffer list - :param str join_str: - string to use for joining the modified buffer list - ''' - buffer_len = vim_funcs['bufnr']('$') - buffer_mod = [str(bufnr) for bufnr in range(1, buffer_len + 1) if int(getbufvar(bufnr, '&modified') or 0)] - if buffer_mod: - return text + join_str.join(buffer_mod) - return None - - -class KwWindowThreadedSegment(KwThreadedSegment): - def set_state(self, **kwargs): - kwargs = kwargs.copy() - for window in vim.windows: - buffer = window.buffer - kwargs['segment_info'] = {'bufnr': buffer.number, 'buffer': buffer} - super(KwWindowThreadedSegment, self).set_state(**kwargs) - - -class RepositorySegment(KwWindowThreadedSegment): - def __init__(self): - super(RepositorySegment, self).__init__() - self.directories = {} - - @staticmethod - def key(segment_info, **kwargs): - # FIXME os.getcwd() is not a proper variant for non-current buffers - return segment_info['buffer'].name or os.getcwd() - - def update(self, *args): - # .compute_state() is running only in this method, and only in one - # thread, thus operations with .directories do not need write locks - # (.render() method is not using .directories). If this is changed - # .directories needs redesigning - self.directories.clear() - return super(RepositorySegment, self).update(*args) - - def compute_state(self, path): - repo = guess(path=path) - if repo: - if repo.directory in self.directories: - return self.directories[repo.directory] - else: - r = self.process_repo(repo) - self.directories[repo.directory] = r - return r - - -@requires_segment_info -class RepositoryStatusSegment(RepositorySegment): - interval = 2 - - @staticmethod - def process_repo(repo): - return repo.status() - - -repository_status = with_docstring(RepositoryStatusSegment(), -'''Return the status for the current repo.''') - - -@requires_segment_info -class BranchSegment(RepositorySegment): - interval = 0.2 - started_repository_status = False - - @staticmethod - def process_repo(repo): - return repo.branch() - - def render_one(self, branch, segment_info, status_colors=False, **kwargs): - if not branch: - return None - - if status_colors: - self.started_repository_status = True - - return [{ - 'contents': branch, - 'highlight_group': (['branch_dirty' if repository_status(segment_info=segment_info, **kwargs) else 'branch_clean'] - if status_colors else []) + ['branch'], - 'divider_highlight_group': 'branch:divider', - }] - - def startup(self, status_colors=False, **kwargs): - super(BranchSegment, self).startup(**kwargs) - if status_colors: - self.started_repository_status = True - repository_status.startup(**kwargs) - - def shutdown(self): - if self.started_repository_status: - repository_status.shutdown() - super(BranchSegment, self).shutdown() - - -branch = with_docstring(BranchSegment(), -'''Return the current working branch. - -:param bool status_colors: - determines whether repository status will be used to determine highlighting. Default: False. - -Highlight groups used: ``branch_clean``, ``branch_dirty``, ``branch``. - -Divider highlight group used: ``branch:divider``. -''') - - -@requires_segment_info -class FileVCSStatusSegment(KwWindowThreadedSegment): - interval = 0.2 - - @staticmethod - def key(segment_info, **kwargs): - name = segment_info['buffer'].name - skip = not (name and (not getbufvar(segment_info['bufnr'], '&buftype'))) - return name, skip - - @staticmethod - def compute_state(key): - name, skip = key - if not skip: - repo = guess(path=name) - if repo: - status = repo.status(os.path.relpath(name, repo.directory)) - if not status: - return None - status = status.strip() - ret = [] - for status in status: - ret.append({ - 'contents': status, - 'highlight_group': ['file_vcs_status_' + status, 'file_vcs_status'], - }) - return ret - return None - - -file_vcs_status = with_docstring(FileVCSStatusSegment(), -'''Return the VCS status for this buffer. - -Highlight groups used: ``file_vcs_status``. -''') diff --git a/common/.local/lib/python2.7/site-packages/powerline/shell.py b/common/.local/lib/python2.7/site-packages/powerline/shell.py deleted file mode 100644 index 4e8aa78..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/shell.py +++ /dev/null @@ -1,55 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from powerline import Powerline -from powerline.lib import mergedicts, parsedotval - - -def mergeargs(argvalue): - if not argvalue: - return None - r = dict([argvalue[0]]) - for subval in argvalue[1:]: - mergedicts(r, dict([subval])) - return r - - -class ShellPowerline(Powerline): - def __init__(self, args, **kwargs): - self.args = args - self.theme_option = mergeargs(args.theme_option) or {} - super(ShellPowerline, self).__init__(args.ext[0], args.renderer_module, **kwargs) - - def load_main_config(self): - r = super(ShellPowerline, self).load_main_config() - if self.args.config: - mergedicts(r, mergeargs(self.args.config)) - return r - - def load_theme_config(self, name): - r = super(ShellPowerline, self).load_theme_config(name) - if name in self.theme_option: - mergedicts(r, self.theme_option[name]) - return r - - def get_config_paths(self): - if self.args.config_path: - return [self.args.config_path] - else: - return super(ShellPowerline, self).get_config_paths() - - -def get_argparser(parser=None, *args, **kwargs): - if not parser: - import argparse - parser = argparse.ArgumentParser - p = parser(*args, **kwargs) - p.add_argument('ext', nargs=1) - p.add_argument('side', nargs='?', choices=('left', 'right')) - p.add_argument('-r', '--renderer_module', metavar='MODULE', type=str) - p.add_argument('-w', '--width', type=int) - p.add_argument('--last_exit_code', metavar='INT', type=int) - p.add_argument('--last_pipe_status', metavar='LIST', default='', type=lambda s: [int(status) for status in s.split()]) - p.add_argument('-c', '--config', metavar='KEY.KEY=VALUE', type=parsedotval, action='append') - p.add_argument('-t', '--theme_option', metavar='THEME.KEY.KEY=VALUE', type=parsedotval, action='append') - p.add_argument('-p', '--config_path', metavar='PATH') - return p diff --git a/common/.local/lib/python2.7/site-packages/powerline/theme.py b/common/.local/lib/python2.7/site-packages/powerline/theme.py deleted file mode 100644 index 56a2a1b..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/theme.py +++ /dev/null @@ -1,148 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from .segment import gen_segment_getter - - -try: - from __builtin__ import unicode -except ImportError: - unicode = str # NOQA - - -def u(s): - if type(s) is unicode: - return s - else: - return unicode(s, 'utf-8') - - -def requires_segment_info(func): - func.powerline_requires_segment_info = True - return func - - -class Theme(object): - def __init__(self, - ext, - theme_config, - common_config, - pl, - top_theme_config=None, - run_once=False, - shutdown_event=None): - self.dividers = theme_config.get('dividers', common_config['dividers']) - self.spaces = theme_config.get('spaces', common_config['spaces']) - self.segments = { - 'left': [], - 'right': [], - } - self.EMPTY_SEGMENT = { - 'contents': None, - 'highlight': {'fg': False, 'bg': False, 'attr': 0} - } - self.pl = pl - theme_configs = [theme_config] - if top_theme_config: - theme_configs.append(top_theme_config) - get_segment = gen_segment_getter(pl, ext, common_config['paths'], theme_configs, theme_config.get('default_module')) - for side in ['left', 'right']: - for segment in theme_config['segments'].get(side, []): - segment = get_segment(segment, side) - if not run_once: - if segment['startup']: - try: - segment['startup'](pl=pl, shutdown_event=shutdown_event, **segment['args']) - except Exception as e: - pl.error('Exception during {0} startup: {1}', segment['name'], str(e)) - continue - self.segments[side].append(segment) - - def shutdown(self): - for segments in self.segments.values(): - for segment in segments: - try: - segment['shutdown']() - except TypeError: - pass - - def get_divider(self, side='left', type='soft'): - '''Return segment divider.''' - return self.dividers[side][type] - - def get_spaces(self): - return self.spaces - - def get_segments(self, side=None, segment_info=None): - '''Return all segments. - - Function segments are called, and all segments get their before/after - and ljust/rjust properties applied. - ''' - for side in [side] if side else ['left', 'right']: - parsed_segments = [] - for segment in self.segments[side]: - if segment['type'] == 'function': - self.pl.prefix = segment['name'] - try: - if (hasattr(segment['contents_func'], 'powerline_requires_segment_info') - and segment['contents_func'].powerline_requires_segment_info): - contents = segment['contents_func'](pl=self.pl, segment_info=segment_info, **segment['args']) - else: - contents = segment['contents_func'](pl=self.pl, **segment['args']) - except Exception as e: - self.pl.exception('Exception while computing segment: {0}', str(e)) - continue - - if contents is None: - continue - if isinstance(contents, list): - segment_base = segment.copy() - if contents: - draw_divider_position = -1 if side == 'left' else 0 - for key, i, newval in ( - ('before', 0, ''), - ('after', -1, ''), - ('draw_soft_divider', draw_divider_position, True), - ('draw_hard_divider', draw_divider_position, True), - ): - try: - contents[i][key] = segment_base.pop(key) - segment_base[key] = newval - except KeyError: - pass - - draw_inner_divider = None - if side == 'right': - append = parsed_segments.append - else: - pslen = len(parsed_segments) - append = lambda item: parsed_segments.insert(pslen, item) - - for subsegment in (contents if side == 'right' else reversed(contents)): - segment_copy = segment_base.copy() - segment_copy.update(subsegment) - if draw_inner_divider is not None: - segment_copy['draw_soft_divider'] = draw_inner_divider - draw_inner_divider = segment_copy.pop('draw_inner_divider', None) - append(segment_copy) - else: - segment['contents'] = contents - parsed_segments.append(segment) - elif segment['width'] == 'auto' or (segment['type'] == 'string' and segment['contents'] is not None): - parsed_segments.append(segment) - else: - continue - for segment in parsed_segments: - segment['contents'] = segment['before'] + u(segment['contents'] if segment['contents'] is not None else '') + segment['after'] - # Align segment contents - if segment['width'] and segment['width'] != 'auto': - if segment['align'] == 'l': - segment['contents'] = segment['contents'].ljust(segment['width']) - elif segment['align'] == 'r': - segment['contents'] = segment['contents'].rjust(segment['width']) - elif segment['align'] == 'c': - segment['contents'] = segment['contents'].center(segment['width']) - # We need to yield a copy of the segment, or else mode-dependent - # segment contents can't be cached correctly e.g. when caching - # non-current window contents for vim statuslines - yield segment.copy() diff --git a/common/.local/lib/python2.7/site-packages/powerline/vim.py b/common/.local/lib/python2.7/site-packages/powerline/vim.py deleted file mode 100644 index b49bd45..0000000 --- a/common/.local/lib/python2.7/site-packages/powerline/vim.py +++ /dev/null @@ -1,95 +0,0 @@ -# vim:fileencoding=utf-8:noet - -from __future__ import absolute_import - -from powerline.bindings.vim import vim_get_func -from powerline import Powerline -from powerline.lib import mergedicts -from powerline.matcher import gen_matcher_getter -import vim - - -vim_exists = vim_get_func('exists', rettype=int) - - -def _override_from(config, override_varname): - if vim_exists(override_varname): - # FIXME vim.eval has problem with numeric types, vim.bindeval may be - # absent (and requires converting values to python built-in types), - # vim.eval with typed call like the one I implemented in frawor is slow. - # Maybe eval(vime.eval('string({0})'.format(override_varname)))? - overrides = vim.eval(override_varname) - mergedicts(config, overrides) - return config - - -class VimPowerline(Powerline): - def __init__(self): - super(VimPowerline, self).__init__('vim') - - def add_local_theme(self, key, config): - '''Add local themes at runtime (during vim session). - - :param str key: - Matcher name (in format ``{matcher_module}.{module_attribute}`` or - ``{module_attribute}`` if ``{matcher_module}`` is - ``powerline.matchers.vim``). Function pointed by - ``{module_attribute}`` should be hashable and accept a dictionary - with information about current buffer and return boolean value - indicating whether current window matched conditions. See also - :ref:`local_themes key description `. - - :param dict config: - :ref:`Theme ` dictionary. - - :return: - ``True`` if theme was added successfully and ``False`` if theme with - the same matcher already exists. - ''' - self.update_renderer() - key = self.get_matcher(key) - try: - self.renderer.add_local_theme(key, {'config': config}) - except KeyError: - return False - else: - return True - - def load_main_config(self): - return _override_from(super(VimPowerline, self).load_main_config(), 'g:powerline_config_overrides') - - def load_theme_config(self, name): - # Note: themes with non-[a-zA-Z0-9_] names are impossible to override - # (though as far as I know exists() won’t throw). Won’t fix, use proper - # theme names. - return _override_from(super(VimPowerline, self).load_theme_config(name), - 'g:powerline_theme_overrides__' + name) - - def get_local_themes(self, local_themes): - if not local_themes: - return {} - - self.get_matcher = gen_matcher_getter(self.ext, self.import_paths) - return dict(((self.get_matcher(key), {'config': self.load_theme_config(val)}) - for key, val in local_themes.items())) - - def get_config_paths(self): - if vim_exists('g:powerline_config_path'): - return [vim.eval('g:powerline_config_path')] - else: - return super(VimPowerline, self).get_config_paths() - - @staticmethod - def get_segment_info(): - return {} - - def reset_highlight(self): - try: - self.renderer.reset_highlight() - except AttributeError: - # Renderer object appears only after first `.render()` call. Thus if - # ColorScheme event happens before statusline is drawn for the first - # time AttributeError will be thrown for the self.renderer. It is - # fine to ignore it: no renderer == no colors to reset == no need to - # do anything. - pass diff --git a/update-home.sh b/update-home.sh index ff6a4d3..a4b9265 100755 --- a/update-home.sh +++ b/update-home.sh @@ -14,6 +14,10 @@ git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh touch ~/.zshrc-local + +pip install --upgrade --user jedi +pip install --upgrade --user git+git://github.com/Lokaltog/powerline + # Copy the graphical part if needed if [ "$1" == '--graphic' ] then @@ -64,9 +68,9 @@ git clone https://github.com/majutsushi/tagbar ~/.vim/bundle/tagbar rm -rf ~/.vim/bundle/vcscommand git clone https://github.com/vim-scripts/vcscommand.vim.git ~/.vim/bundle/vcscommand -# Install easytag plugin +# Remove easytag plugin rm -rf ~/.vim/bundle/easytag -git clone https://github.com/vim-scripts/easytags.vim.git ~/.vim/bundle/easytag +#git clone https://github.com/vim-scripts/easytags.vim.git ~/.vim/bundle/easytag # Install closetag plugin rm -rf ~/.vim/bundle/closetag @@ -79,3 +83,16 @@ git clone https://github.com/vim-scripts/css3-mod.git ~/.vim/bundle/css3-mod # Install less syntax rm -rf ~/.vim/bundle/vim-less git clone https://github.com/groenewege/vim-less.git ~/.vim/bundle/vim-less + +# Install grep plugin +rm -rf ~/.vim/bundle/grep +git clone https://github.com/vim-scripts/grep.vim.git ~/.vim/bundle/grep + +# Install vimwiki +rm -rf ~/.vim/bundle/vimwiki +hg clone https://code.google.com/p/vimwiki/src ~/.vim/bundle/vimwiki + +# Install notes plugin +rm -rf ~/.vim/bundle/notes +git clone https://github.com/vim-scripts/notes.vim.git ~/.vim/bundle/notes +