-
Notifications
You must be signed in to change notification settings - Fork 90
Jedi Autocomplete #3114
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Jedi Autocomplete #3114
Changes from all commits
d1a4b61
cb6c66d
ae37b62
3587dfe
7e4fa95
888e8ff
2a335a6
e0249c3
f6a6ead
38c4e26
fdfaedc
96a57a5
5ba99bb
a512424
64c75ae
c30e795
f9abec8
97b4b0c
1a24e89
ed99c5d
733d8be
0ad8d98
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,64 @@ | ||
| absl-py==1.3.0 | ||
| astunparse==1.6.3 | ||
| cachetools==5.2.0 | ||
| certifi==2022.9.24 | ||
| charset-normalizer==2.1.1 | ||
| click==8.1.3 | ||
| deephaven-plugin==0.3.0 | ||
| flatbuffers==2.0.7 | ||
| gast==0.4.0 | ||
| google-auth==2.14.1 | ||
| google-auth-oauthlib==0.4.6 | ||
| google-pasta==0.2.0 | ||
| grpcio==1.51.1 | ||
| h5py==3.7.0 | ||
| idna==3.4 | ||
| importlib-metadata==5.1.0 | ||
| java-utilities==0.2.0 | ||
| jedi==0.18.2 | ||
| joblib==1.2.0 | ||
| jpy==0.13.0 | ||
| keras==2.7.0 | ||
| Keras-Preprocessing==1.1.2 | ||
| libclang==14.0.6 | ||
| llvmlite==0.39.1 | ||
| Markdown==3.4.1 | ||
| MarkupSafe==2.1.1 | ||
| nltk==3.7 | ||
| numba==0.56.4 | ||
| numpy==1.21.6 | ||
| nvidia-cublas-cu11==11.10.3.66 | ||
| nvidia-cuda-nvrtc-cu11==11.7.99 | ||
| nvidia-cuda-runtime-cu11==11.7.99 | ||
| nvidia-cudnn-cu11==8.5.0.96 | ||
| oauthlib==3.2.2 | ||
| opt-einsum==3.3.0 | ||
| pandas==1.3.5 | ||
| parso==0.8.3 | ||
| protobuf==3.19.6 | ||
| pyasn1==0.4.8 | ||
| pyasn1-modules==0.2.8 | ||
| python-dateutil==2.8.2 | ||
| pytz==2022.6 | ||
| regex==2022.10.31 | ||
| requests==2.28.1 | ||
| requests-oauthlib==1.3.1 | ||
| rsa==4.9 | ||
| scikit-learn==1.0.2 | ||
| scipy==1.7.3 | ||
| six==1.16.0 | ||
| tensorboard==2.11.0 | ||
| tensorboard-data-server==0.6.1 | ||
| tensorboard-plugin-wit==1.8.1 | ||
| tensorflow==2.7.4 | ||
| tensorflow-estimator==2.7.0 | ||
| tensorflow-io-gcs-filesystem==0.28.0 | ||
| termcolor==2.1.1 | ||
| threadpoolctl==3.1.0 | ||
| torch==1.13.0 | ||
| tqdm==4.64.1 | ||
| typing_extensions==4.4.0 | ||
| urllib3==1.26.13 | ||
| Werkzeug==2.2.2 | ||
| wrapt==1.14.1 | ||
| zipp==3.11.0 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,25 @@ | ||
| # | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If this needs to be public, I would prefer it is called |
||
| # Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending | ||
| # | ||
|
|
||
| """ This module allows the user to configure if and how we use jedi to perform autocompletion. | ||
| See https://github.com/davidhalter/jedi for information on jedi. | ||
| # To disable autocompletion | ||
| from deephaven.completer import jedi_settings | ||
| jedi_settings.mode = 'off' | ||
| Valid options for completer_mode are one of: [off, safe, strong]. | ||
| off: do not use any autocomplete | ||
| safe mode: uses static analysis of source files. Can't execute any code. | ||
| strong mode: looks in your globals() for answers to autocomplete and analyzes your runtime python objects | ||
| later, we may add slow mode, which uses both static and interpreted completion modes. | ||
| """ | ||
|
|
||
| from deephaven.completer._completer import Completer | ||
| from jedi import preload_module, Interpreter | ||
|
|
||
| jedi_settings = Completer() | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Needs sphinx doc string. |
||
| # warm jedi up a little. We could probably off-thread this. | ||
| preload_module('deephaven') | ||
| Interpreter('', []).complete(1, 0) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,111 @@ | ||
| # only python 3.8 needs this, but it must be the first expression in the file, so we can't predicate it | ||
| from __future__ import annotations | ||
| from enum import Enum | ||
| from typing import Any | ||
| from jedi import Interpreter, Script | ||
|
|
||
|
|
||
| class CompleterMode(Enum): | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Everything here needs appropriate docs. |
||
| off = 'off' | ||
| safe = 'safe' | ||
| strong = 'strong' | ||
|
Comment on lines
+9
to
+11
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I assume these should be caps, since they are enum values. |
||
|
|
||
| def __str__(self) -> str: | ||
| return self.value | ||
|
|
||
|
|
||
| class Completer(object): | ||
|
|
||
| def __init__(self): | ||
| self._docs = {} | ||
| self._versions = {} | ||
| # we will replace this w/ top-level globals() when we open the document | ||
| self.__scope = globals() | ||
| # might want to make this a {uri: []} instead of [] | ||
| self.pending = [] | ||
| try: | ||
| import jedi | ||
| self.__can_jedi = True | ||
| self.mode = CompleterMode.strong | ||
| except ImportError: | ||
| self.__can_jedi = False | ||
| self.mode = CompleterMode.off | ||
|
|
||
| @property | ||
| def mode(self) -> CompleterMode: | ||
| return self.__mode | ||
|
|
||
| @mode.setter | ||
| def mode(self, mode) -> None: | ||
| if type(mode) == 'str': | ||
| mode = CompleterMode[mode] | ||
| self.__mode = mode | ||
|
|
||
| def open_doc(self, text: str, uri: str, version: int) -> None: | ||
| self._docs[uri] = text | ||
| self._versions[uri] = version | ||
|
|
||
| def get_doc(self, uri: str) -> str: | ||
| return self._docs[uri] | ||
|
|
||
| def update_doc(self, text: str, uri: str, version: int) -> None: | ||
| self._docs[uri] = text | ||
| self._versions[uri] = version | ||
| # any pending completions should stop running now. We use a list of Event to signal any running threads to stop | ||
| for pending in self.pending: | ||
| pending.set() | ||
|
|
||
| def close_doc(self, uri: str) -> None: | ||
| del self._docs[uri] | ||
| del self._versions[uri] | ||
| for pending in self.pending: | ||
| pending.set() | ||
|
|
||
| def is_enabled(self) -> bool: | ||
| return self.__mode != CompleterMode.off | ||
|
|
||
| def can_jedi(self) -> bool: | ||
| return self.__can_jedi | ||
|
|
||
| def set_scope(self, scope: dict) -> None: | ||
| self.__scope = scope | ||
|
|
||
| def do_completion(self, uri: str, version: int, line: int, col: int) -> list[list[Any]]: | ||
| if not self._versions[uri] == version: | ||
| # if you aren't the newest completion, you get nothing, quickly | ||
| return [] | ||
|
|
||
| # run jedi | ||
| txt = self.get_doc(uri) | ||
| # The Script completer is static analysis only, so we should actually be feeding it a whole document at once. | ||
|
|
||
| completer = Script if self.__mode == CompleterMode.safe else Interpreter | ||
|
|
||
| completions = completer(txt, [self.__scope]).complete(line, col) | ||
| # for now, a simple sorting based on number of preceding _ | ||
| # we may want to apply additional sorting to each list before combining | ||
| results: list = [] | ||
| results_: list = [] | ||
| results__: list = [] | ||
| for complete in completions: | ||
| # keep checking the latest version as we run, so updated doc can cancel us | ||
| if not self._versions[uri] == version: | ||
| return [] | ||
| result: list = self.to_result(complete, col) | ||
| if result[0].startswith('__'): | ||
| results__.append(result) | ||
| elif result[0].startswith('_'): | ||
| results_.append(result) | ||
| else: | ||
| results.append(result) | ||
|
|
||
| # put the results together in a better-than-nothing sorting | ||
| return results + results_ + results__ | ||
|
|
||
| @staticmethod | ||
| def to_result(complete: Any, col: int) -> list[Any]: | ||
| name: str = complete.name | ||
| prefix_length: int = complete.get_completion_prefix_length() | ||
| start: int = col - prefix_length | ||
| # all java needs to build a grpc response is completion text (name) and where the completion should start | ||
| return [name, start] | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is what I've been doing for community deployments. Does not really belong in this branch.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm going to leave it in unless someone complains. Just makes doing deployments that build deephaven-core easier / possible.