VR0(61Xuv8be`st-V@_G3vkD*AnS_o$jD
zzINXEcb()P>!8mO^S@05D*6u4SGafZe;58)aTgaCjsG08qbWIxD&MUy|FOrB3MCP2
z_Uh=4L-x2EeK_=NQlrP-`Q!Gg(-Ys1@_3i+{`pZ*T2JL5(Q;+0`;51MBV3*Q_f*XnVQQ9%pxV7tPw*S^_Au;L66vRMQ#9U@9sqiNfOIpv1Ja+lATL
z*(`t|u8|75Y^t=cInA3(Lw5N?zupsYKY@#e9Jm%qvWtSeGRJ$qP1B*DenGxSsk7my
zL7|~`3z0=`)dnAq7G1U{m8s?k;
_UJpK=uj2Gu
zHEow_qN1kW*7fo6ahI1L$~`zZp!XAXSgc(Jl=8f4Hvee_m*t?k0ULSfj=#2M>WJdc
z!Te7-3z4lOU^~{)xB9UU_nlWZ#jGY+(DYmxK*hK$eX{DQ8~k3`%)s;b28@(Ou@iJp
z)1%6iW?#E7H}?goDw!rIbkz(uI@^3
zN91TBMUh1wc))9kMIV3EDe%;6J=EthQ+_;bmk&NsNL1-`)4R7$RrLNQWdvdqYt*(w
ziVW41ky)L^OCbtPqmqm%-i(**`fqS_7Y^s;oz6b>PTl23Q}~;mc5Y0q@xL46k7ORJ7&kM|qpi2~r8{kZIn7KJ$UlF6+K6VeJd`LkdK(Gx6V*G5Irnm`G9<%g
z>N)SU1<;_aYZ4>b=)0|+FWQb!ROUq@+p-R3I^8m^FnepZwl#~P2+TJSu{*mb0rqAb
zyHv8edU^~V#1YVr%@hQR!TxAdjcs-;08_~ct
zck4t2jl#Wd^zpGfJR&rne_+J&${kFmz#>022&vz}mX=}-)-HwVE+C$wO2xiz7uCqV
zn~JL?HtIb`v$*fqjJe7DlN>kyfNI^Ylzlr
zItfs_-2(CBC&=gfS_$!VJ$H-BbvPdmJ*&ufu#t4kq*zi7`bhqiuUdwjQ7&q75xD@{
zBTI2~wXL=N1DwOtBoIRfZ@qGp7Rxjhk<^@&Ky=r4(igL>S_a?93x?`l5e@Tq)A@+L
zCht?G5{GK_+7$oB5LvJ2Xn)$;493DAYfE00U;DHrcQ5g~d+Eb8Ac
z;|l!b6jw)RDqFkT6iz&8g3Rl60!tsoqUf4WX3lPmXLf@`-x$h}zcXwQtV1#b
zw(HnW&3rsd1hp$DtW3%Jsxl9aZw^{c31{3}JEtt2NI=4WDV9x%Q?`^2`e1xMip@RH
zAYV$hyJZ|TXLhx#)sfIob=z^(45fo!`Znx{HMXaoCV;Zwm%s1axu2u)>GEB^!neCL
XG-VTWMgg0R;S^+4KU7P7{QUm`$cK!=
literal 0
HcmV?d00001
diff --git a/archive/classic_docs/code-execution/computer-api.mdx b/archive/classic_docs/code-execution/computer-api.mdx
new file mode 100644
index 0000000000..699192843c
--- /dev/null
+++ b/archive/classic_docs/code-execution/computer-api.mdx
@@ -0,0 +1,240 @@
+---
+title: Computer API
+---
+
+The following functions are designed for language models to use in Open Interpreter, currently only supported in [OS Mode](/guides/os-mode/).
+
+### Display - View
+
+Takes a screenshot of the primary display.
+
+
+
+```python
+interpreter.computer.display.view()
+```
+
+
+
+### Display - Center
+
+Gets the x, y value of the center of the screen.
+
+
+
+```python
+x, y = interpreter.computer.display.center()
+```
+
+
+
+### Keyboard - Hotkey
+
+Performs a hotkey on the computer
+
+
+
+```python
+interpreter.computer.keyboard.hotkey(" ", "command")
+```
+
+
+
+### Keyboard - Write
+
+Writes the text into the currently focused window.
+
+
+
+```python
+interpreter.computer.keyboard.write("hello")
+```
+
+
+
+### Mouse - Click
+
+Clicks on the specified coordinates, or an icon, or text. If text is specified, OCR will be run on the screenshot to find the text coordinates and click on it.
+
+
+
+```python
+# Click on coordinates
+interpreter.computer.mouse.click(x=100, y=100)
+
+# Click on text on the screen
+interpreter.computer.mouse.click("Onscreen Text")
+
+# Click on a gear icon
+interpreter.computer.mouse.click(icon="gear icon")
+```
+
+
+
+### Mouse - Move
+
+Moves to the specified coordinates, or an icon, or text. If text is specified, OCR will be run on the screenshot to find the text coordinates and move to it.
+
+
+
+```python
+# Click on coordinates
+interpreter.computer.mouse.move(x=100, y=100)
+
+# Click on text on the screen
+interpreter.computer.mouse.move("Onscreen Text")
+
+# Click on a gear icon
+interpreter.computer.mouse.move(icon="gear icon")
+```
+
+
+
+### Mouse - Scroll
+
+Scrolls the mouse a specified number of pixels.
+
+
+
+```python
+# Scroll Down
+interpreter.computer.mouse.scroll(-10)
+
+# Scroll Up
+interpreter.computer.mouse.scroll(10)
+```
+
+
+
+### Clipboard - View
+
+Returns the contents of the clipboard.
+
+
+
+```python
+interpreter.computer.clipboard.view()
+```
+
+
+
+### OS - Get Selected Text
+
+Get the selected text on the screen.
+
+
+
+```python
+interpreter.computer.os.get_selected_text()
+```
+
+
+
+### Mail - Get
+
+Retrieves the last `number` emails from the inbox, optionally filtering for only unread emails. (Mac only)
+
+
+
+```python
+interpreter.computer.mail.get(number=10, unread=True)
+```
+
+
+
+### Mail - Send
+
+Sends an email with the given parameters using the default mail app. (Mac only)
+
+
+
+```python
+interpreter.computer.mail.send("john@email.com", "Subject", "Body", ["path/to/attachment.pdf", "path/to/attachment2.pdf"])
+```
+
+
+
+### Mail - Unread Count
+
+Retrieves the count of unread emails in the inbox. (Mac only)
+
+
+
+```python
+interpreter.computer.mail.unread_count()
+```
+
+
+
+### SMS - Send
+
+Send a text message using the default SMS app. (Mac only)
+
+
+
+```python
+interpreter.computer.sms.send("2068675309", "Hello from Open Interpreter!")
+```
+
+
+
+### Contacts - Get Phone Number
+
+Returns the phone number of a contact name. (Mac only)
+
+
+
+```python
+interpreter.computer.contacts.get_phone_number("John Doe")
+```
+
+
+
+### Contacts - Get Email Address
+
+Returns the email of a contact name. (Mac only)
+
+
+
+```python
+interpreter.computer.contacts.get_phone_number("John Doe")
+```
+
+
+
+### Calendar - Get Events
+
+Fetches calendar events for the given date or date range from all calendars. (Mac only)
+
+
+
+```python
+interpreter.computer.calendar.get_events(start_date=datetime, end_date=datetime)
+```
+
+
+
+### Calendar - Create Event
+
+Creates a new calendar event. Uses first calendar if none is specified (Mac only)
+
+
+
+```python
+interpreter.computer.calendar.create_event(title="Title", start_date=datetime, end_date=datetime, location="Location", notes="Notes", calendar="Work")
+```
+
+
+
+### Calendar - Delete Event
+
+Delete a specific calendar event. (Mac only)
+
+
+
+```python
+interpreter.computer.calendar.delete_event(event_title="Title", start_date=datetime, calendar="Work")
+```
+
+
+
diff --git a/archive/classic_docs/code-execution/custom-languages.mdx b/archive/classic_docs/code-execution/custom-languages.mdx
new file mode 100644
index 0000000000..9f342e5011
--- /dev/null
+++ b/archive/classic_docs/code-execution/custom-languages.mdx
@@ -0,0 +1,76 @@
+---
+title: Custom Languages
+---
+
+You can add or edit the programming languages that Open Interpreter's computer runs.
+
+In this example, we'll swap out the `python` language for a version of `python` that runs in the cloud. We'll use `E2B` to do this.
+
+([`E2B`](https://e2b.dev/) is a secure, sandboxed environment where you can run arbitrary code.)
+
+First, [get an API key here](https://e2b.dev/), and set it:
+
+```python
+import os
+os.environ["E2B_API_KEY"] = ""
+```
+
+Then, define a custom language for Open Interpreter. The class name doesn't matter, but we'll call it `PythonE2B`:
+
+```python
+import e2b
+
+class PythonE2B:
+ """
+ This class contains all requirements for being a custom language in Open Interpreter:
+
+ - name (an attribute)
+ - run (a method)
+ - stop (a method)
+ - terminate (a method)
+
+ You can use this class to run any language you know how to run, or edit any of the official languages (which also conform to this class).
+
+ Here, we'll use E2B to power the `run` method.
+ """
+
+ # This is the name that will appear to the LLM.
+ name = "python"
+
+ # Optionally, you can append some information about this language to the system message:
+ system_message = "# Follow this rule: Every Python code block MUST contain at least one print statement."
+
+ # (E2B isn't a Jupyter Notebook, so we added ^ this so it would print things,
+ # instead of putting variables at the end of code blocks, which is a Jupyter thing.)
+
+ def run(self, code):
+ """Generator that yields a dictionary in LMC Format."""
+
+ # Run the code on E2B
+ stdout, stderr = e2b.run_code('Python3', code)
+
+ # Yield the output
+ yield {
+ "type": "console", "format": "output",
+ "content": stdout + stderr # We combined these arbitrarily. Yield anything you'd like!
+ }
+
+ def stop(self):
+ """Stops the code."""
+ # Not needed here, because e2b.run_code isn't stateful.
+ pass
+
+ def terminate(self):
+ """Terminates the entire process."""
+ # Not needed here, because e2b.run_code isn't stateful.
+ pass
+
+# (Tip: Do this before adding/removing languages, otherwise OI might retain the state of previous languages:)
+interpreter.computer.terminate()
+
+# Give Open Interpreter its languages. This will only let it run PythonE2B:
+interpreter.computer.languages = [PythonE2B]
+
+# Try it out!
+interpreter.chat("What's 349808*38490739?")
+```
\ No newline at end of file
diff --git a/archive/classic_docs/code-execution/settings.mdx b/archive/classic_docs/code-execution/settings.mdx
new file mode 100644
index 0000000000..0373d0af9e
--- /dev/null
+++ b/archive/classic_docs/code-execution/settings.mdx
@@ -0,0 +1,7 @@
+---
+title: Settings
+---
+
+The `interpreter.computer` is responsible for executing code.
+
+[Click here](https://docs.openinterpreter.com/settings/all-settings#computer) to view `interpreter.computer` settings.
diff --git a/archive/classic_docs/code-execution/usage.mdx b/archive/classic_docs/code-execution/usage.mdx
new file mode 100644
index 0000000000..9fe2b7542c
--- /dev/null
+++ b/archive/classic_docs/code-execution/usage.mdx
@@ -0,0 +1,36 @@
+---
+title: Usage
+---
+
+# Running Code
+
+The `computer` itself is separate from Open Interpreter's core, so you can run it independently:
+
+```python
+from interpreter import interpreter
+
+interpreter.computer.run("python", "print('Hello World!')")
+```
+
+This runs in the same Python instance that interpreter uses, so you can define functions, variables, or log in to services before the AI starts running code:
+
+```python
+interpreter.computer.run("python", "import replicate\nreplicate.api_key='...'")
+
+interpreter.custom_instructions = "Replicate has already been imported."
+
+interpreter.chat("Please generate an image on replicate...") # Interpreter will be logged into Replicate
+```
+
+# Custom Languages
+
+You also have control over the `computer`'s languages (like Python, Javascript, and Shell), and can easily append custom languages:
+
+
+ Add or customize the programming languages that Open Interpreter can use.
+
\ No newline at end of file
diff --git a/interpreter/core/__init__.py b/archive/classic_docs/computer/custom-languages.mdx
similarity index 100%
rename from interpreter/core/__init__.py
rename to archive/classic_docs/computer/custom-languages.mdx
diff --git a/archive/classic_docs/computer/introduction.mdx b/archive/classic_docs/computer/introduction.mdx
new file mode 100644
index 0000000000..45f862c6bd
--- /dev/null
+++ b/archive/classic_docs/computer/introduction.mdx
@@ -0,0 +1,13 @@
+The Computer module is responsible for executing code.
+
+You can manually execute code in the same instance that Open Interpreter uses:
+
+```
+
+```
+
+User Usage
+
+It also comes with a suite of modules that we think are particularly useful to code interpreting LLMs.
+
+LLM Usage
\ No newline at end of file
diff --git a/archive/classic_docs/computer/language-model-usage.mdx b/archive/classic_docs/computer/language-model-usage.mdx
new file mode 100644
index 0000000000..eb6abda160
--- /dev/null
+++ b/archive/classic_docs/computer/language-model-usage.mdx
@@ -0,0 +1,3 @@
+Open Interpreter can use the Computer module itself.
+
+Here's what it can do:
\ No newline at end of file
diff --git a/archive/classic_docs/computer/user-usage.mdx b/archive/classic_docs/computer/user-usage.mdx
new file mode 100644
index 0000000000..c879f7f82f
--- /dev/null
+++ b/archive/classic_docs/computer/user-usage.mdx
@@ -0,0 +1,5 @@
+The Computer module is responsible for running code.
+
+You can add custom languages to it.
+
+The user can add custom languages to the Computer, and .run code on it.
\ No newline at end of file
diff --git a/archive/classic_docs/getting-started/introduction.mdx b/archive/classic_docs/getting-started/introduction.mdx
new file mode 100644
index 0000000000..8223b5e45c
--- /dev/null
+++ b/archive/classic_docs/getting-started/introduction.mdx
@@ -0,0 +1,44 @@
+---
+title: Introduction
+description: A new way to use computers
+---
+
+# Introduction
+
+
+
+**Open Interpreter** lets language models run code.
+
+You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running `interpreter` after installing.
+
+This provides a natural-language interface to your computer's general-purpose capabilities:
+
+- Create and edit photos, videos, PDFs, etc.
+- Control a Chrome browser to perform research
+- Plot, clean, and analyze large datasets
+- ...etc.
+
+
+
+You can also build Open Interpreter into your applications with [our Python package.](/usage/python/arguments)
+
+---
+
+Quick start
+
+If you already use Python, you can install Open Interpreter via `pip`:
+
+
+
+```bash
+pip install open-interpreter
+```
+
+
+```bash
+interpreter
+```
+
+
+
+We've also developed [one-line installers](/getting-started/setup#experimental-one-line-installers) that install Python and set up Open Interpreter.
diff --git a/archive/classic_docs/getting-started/setup.mdx b/archive/classic_docs/getting-started/setup.mdx
new file mode 100644
index 0000000000..4ddd5c9772
--- /dev/null
+++ b/archive/classic_docs/getting-started/setup.mdx
@@ -0,0 +1,85 @@
+---
+title: Setup
+---
+
+
+
+## Installation from `pip`
+
+If you are familiar with Python, we recommend installing Open Interpreter via `pip`
+
+```bash
+pip install open-interpreter
+```
+
+
+ You'll need Python
+ [3.10](https://www.python.org/downloads/release/python-3100/) or
+ [3.11](https://www.python.org/downloads/release/python-3110/). Run `python
+ --version` to check yours.
+
+It is recommended to install Open Interpreter in a [virtual
+environment](https://docs.python.org/3/library/venv.html).
+
+
+
+## Install optional dependencies from `pip`
+
+Open Interpreter has optional dependencies for different capabilities
+
+[Local Mode](/guides/running-locally) dependencies
+
+```bash
+pip install open-interpreter[local]
+```
+
+[OS Mode](/guides/os-mode) dependencies
+
+```bash
+pip install open-interpreter[os]
+```
+
+[Safe Mode](/safety/safe-mode) dependencies
+
+```bash
+pip install open-interpreter[safe]
+```
+
+Server dependencies
+
+```bash
+pip install open-interpreter[server]
+```
+
+## Experimental one-line installers
+
+To try our experimental installers, open your Terminal with admin privileges [(click here to learn how)](https://chat.openai.com/share/66672c0f-0935-4c16-ac96-75c1afe14fe3), then paste the following commands:
+
+
+
+```bash Mac
+curl -sL https://raw.githubusercontent.com/openinterpreter/open-interpreter/main/installers/oi-mac-installer.sh | bash
+```
+
+```powershell Windows
+iex "& {$(irm https://raw.githubusercontent.com/openinterpreter/open-interpreter/main/installers/oi-windows-installer-conda.ps1)}"
+```
+
+```bash Linux
+curl -sL https://raw.githubusercontent.com/openinterpreter/open-interpreter/main/installers/oi-linux-installer.sh | bash
+```
+
+
+
+These installers will attempt to download Python, set up an environment, and install Open Interpreter for you.
+
+## No Installation
+
+If configuring your computer environment is challenging, you can press the `,` key on the [GitHub page](https://github.com/OpenInterpreter/open-interpreter) to create a codespace. After a moment, you'll receive a cloud virtual machine environment pre-installed with open-interpreter. You can then start interacting with it directly and freely confirm its execution of system commands without worrying about damaging the system.
diff --git a/archive/classic_docs/guides/advanced-terminal-usage.mdx b/archive/classic_docs/guides/advanced-terminal-usage.mdx
new file mode 100644
index 0000000000..0a46dea8e1
--- /dev/null
+++ b/archive/classic_docs/guides/advanced-terminal-usage.mdx
@@ -0,0 +1,17 @@
+---
+title: Advanced Terminal Usage
+---
+
+Magic commands can be used to control the interpreter's behavior in interactive mode:
+
+- `%% [shell commands, like ls or cd]`: Run commands in Open Interpreter's shell instance
+- `%verbose [true/false]`: Toggle verbose mode. Without arguments or with 'true', it enters verbose mode. With 'false', it exits verbose mode.
+- `%reset`: Reset the current session.
+- `%undo`: Remove previous messages and its response from the message history.
+- `%save_message [path]`: Saves messages to a specified JSON path. If no path is provided, it defaults to 'messages.json'.
+- `%load_message [path]`: Loads messages from a specified JSON path. If no path is provided, it defaults to 'messages.json'.
+- `%tokens [prompt]`: EXPERIMENTAL: Calculate the tokens used by the next request based on the current conversation's messages and estimate the cost of that request; optionally provide a prompt to also calculate the tokens used by that prompt and the total amount of tokens that will be sent with the next request.
+- `%info`: Show system and interpreter information.
+- `%help`: Show this help message.
+- `%jupyter`: Export the current session to a Jupyter notebook file (.ipynb) to the Downloads folder.
+- `%markdown [path]`: Export the conversation to a specified Markdown path. If no path is provided, it will be saved to the Downloads folder with a generated conversation name.
\ No newline at end of file
diff --git a/archive/classic_docs/guides/basic-usage.mdx b/archive/classic_docs/guides/basic-usage.mdx
new file mode 100644
index 0000000000..4080b5393d
--- /dev/null
+++ b/archive/classic_docs/guides/basic-usage.mdx
@@ -0,0 +1,163 @@
+---
+title: Basic Usage
+---
+
+
+
+
+ Try Open Interpreter without installing anything on your computer
+
+
+
+ An example implementation of Open Interpreter's streaming capabilities
+
+
+
+
+---
+
+### Interactive Chat
+
+To start an interactive chat in your terminal, either run `interpreter` from the command line or `interpreter.chat()` from a .py file.
+
+
+
+```shell Terminal
+interpreter
+```
+
+```python Python
+interpreter.chat()
+```
+
+
+
+---
+
+### Programmatic Chat
+
+For more precise control, you can pass messages directly to `.chat(message)` in Python:
+
+```python
+interpreter.chat("Add subtitles to all videos in /videos.")
+
+# ... Displays output in your terminal, completes task ...
+
+interpreter.chat("These look great but can you make the subtitles bigger?")
+
+# ...
+```
+
+---
+
+### Start a New Chat
+
+In your terminal, Open Interpreter behaves like ChatGPT and will not remember previous conversations. Simply run `interpreter` to start a new chat.
+
+In Python, Open Interpreter remembers conversation history. If you want to start fresh, you can reset it.
+
+
+
+```shell Terminal
+interpreter
+```
+
+```python Python
+interpreter.messages = []
+```
+
+
+
+---
+
+### Save and Restore Chats
+
+In your terminal, Open Interpreter will save previous conversations to `/Open Interpreter/conversations/`.
+
+You can resume any of them by running `--conversations`. Use your arrow keys to select one , then press `ENTER` to resume it.
+
+In Python, `interpreter.chat()` returns a List of messages, which can be used to resume a conversation with `interpreter.messages = messages`.
+
+
+
+```shell Terminal
+interpreter --conversations
+```
+
+```python Python
+# Save messages to 'messages'
+messages = interpreter.chat("My name is Killian.")
+
+# Reset interpreter ("Killian" will be forgotten)
+interpreter.messages = []
+
+# Resume chat from 'messages' ("Killian" will be remembered)
+interpreter.messages = messages
+```
+
+
+
+---
+
+### Configure Default Settings
+
+We save default settings to the `default.yaml` profile which can be opened and edited by running the following command:
+
+```shell
+interpreter --profiles
+```
+
+You can use this to set your default language model, system message (custom instructions), max budget, etc.
+
+
+ **Note:** The Python library will also inherit settings from the default
+ profile file. You can change it by running `interpreter --profiles` and
+ editing `default.yaml`.
+
+
+---
+
+### Customize System Message
+
+In your terminal, modify the system message by [editing your configuration file as described here](#configure-default-settings).
+
+In Python, you can inspect and configure Open Interpreter's system message to extend its functionality, modify permissions, or give it more context.
+
+```python
+interpreter.system_message += """
+Run shell commands with -y so the user doesn't have to confirm them.
+"""
+print(interpreter.system_message)
+```
+
+---
+
+### Change your Language Model
+
+Open Interpreter uses [LiteLLM](https://docs.litellm.ai/docs/providers/) to connect to language models.
+
+You can change the model by setting the model parameter:
+
+```shell
+interpreter --model gpt-3.5-turbo
+interpreter --model claude-2
+interpreter --model command-nightly
+```
+
+In Python, set the model on the object:
+
+```python
+interpreter.llm.model = "gpt-3.5-turbo"
+```
+
+[Find the appropriate "model" string for your language model here.](https://docs.litellm.ai/docs/providers/)
diff --git a/archive/classic_docs/guides/demos.mdx b/archive/classic_docs/guides/demos.mdx
new file mode 100644
index 0000000000..290ae512e2
--- /dev/null
+++ b/archive/classic_docs/guides/demos.mdx
@@ -0,0 +1,59 @@
+---
+title: Demos
+---
+
+### Vision Mode
+
+#### Recreating a Tailwind Component
+
+Creating a dropdown menu in Tailwind from a single screenshot:
+
+
+
+#### Recreating the ChatGPT interface using GPT-4V:
+
+
+
+### OS Mode
+
+#### Playing Music
+
+Open Interpreter playing some Lofi using OS mode:
+
+
+
+#### Open Interpreter Chatting with Open Interpreter
+
+OS mode creating and chatting with a local instance of Open Interpreter:
+
+
+
+#### Controlling an Arduino
+
+Reading temperature and humidity from an Arudino:
+
+
+
+#### Music Creation
+
+OS mode using Logic Pro X to record a piano song and play it back:
+
+
+
+#### Generating images in Everart.ai
+
+Open Interpreter describing pictures it wants to make, then creating them using OS mode:
+
+
+
+#### Open Interpreter Conversing With ChatGPT
+
+OS mode has a conversation with ChatGPT and even asks it "What do you think about human/AI interaction?"
+
+
+
+#### Sending an Email with Gmail
+
+OS mode launches Safari, composes an email, and sends it:
+
+
diff --git a/archive/classic_docs/guides/multiple-instances.mdx b/archive/classic_docs/guides/multiple-instances.mdx
new file mode 100644
index 0000000000..4ff4db455d
--- /dev/null
+++ b/archive/classic_docs/guides/multiple-instances.mdx
@@ -0,0 +1,37 @@
+---
+title: Multiple Instances
+---
+
+To create multiple instances, use the base class, `OpenInterpreter`:
+
+```python
+from interpreter import OpenInterpreter
+
+agent_1 = OpenInterpreter()
+agent_1.system_message = "This is a separate instance."
+
+agent_2 = OpenInterpreter()
+agent_2.system_message = "This is yet another instance."
+```
+
+For fun, you could make these instances talk to eachother:
+
+```python
+def swap_roles(messages):
+ for message in messages:
+ if message['role'] == 'user':
+ message['role'] = 'assistant'
+ elif message['role'] == 'assistant':
+ message['role'] = 'user'
+ return messages
+
+agents = [agent_1, agent_2]
+
+# Kick off the conversation
+messages = [{"role": "user", "message": "Hello!"}]
+
+while True:
+ for agent in agents:
+ messages = agent.chat(messages)
+ messages = swap_roles(messages)
+```
diff --git a/archive/classic_docs/guides/os-mode.mdx b/archive/classic_docs/guides/os-mode.mdx
new file mode 100644
index 0000000000..4054820dad
--- /dev/null
+++ b/archive/classic_docs/guides/os-mode.mdx
@@ -0,0 +1,17 @@
+---
+title: OS Mode
+---
+
+OS mode is a highly experimental mode that allows Open Interpreter to control the operating system visually through the mouse and keyboard. It provides a multimodal LLM like GPT-4V with the necessary tools to capture screenshots of the display and interact with on-screen elements such as text and icons. It will try to use the most direct method to achieve the goal, like using spotlight on Mac to open applications, and using query parameters in the URL to open websites with additional information.
+
+OS mode is a work in progress, if you have any suggestions or experience issues, please reach out on our [Discord](https://discord.com/invite/6p3fD6rBVm).
+
+To enable OS Mode, run the interpreter with the `--os` flag:
+
+```bash
+interpreter --os
+```
+
+Please note that screen recording permissions must be enabled for your terminal application for OS mode to work properly to work.
+
+OS mode does not currently support multiple displays.
diff --git a/archive/classic_docs/guides/profiles.mdx b/archive/classic_docs/guides/profiles.mdx
new file mode 100644
index 0000000000..4474634601
--- /dev/null
+++ b/archive/classic_docs/guides/profiles.mdx
@@ -0,0 +1,69 @@
+---
+title: Profiles
+---
+
+
+
+Profiles are a powerful way to customize your instance of Open Interpreter.
+
+Profiles are Python files that configure Open Interpreter. A wide range of fields from the [model](/settings/all-settings#model-selection) to the [context window](/settings/all-settings#context-window) to the [message templates](/settings/all-settings#user-message-template) can be configured in a Profile. This allows you to save multiple variations of Open Interpreter to optimize for your specific use-cases.
+
+You can access your Profiles by running `interpreter --profiles`. This will open the directory where all of your Profiles are stored.
+
+If you want to make your own profile, start with the [Template Profile](https://github.com/OpenInterpreter/open-interpreter/blob/main/interpreter/terminal_interface/profiles/defaults/template_profile.py).
+
+To apply a Profile to an Open Interpreter session, you can run `interpreter --profile `
+
+# Example Python Profile
+
+```Python
+from interpreter import interpreter
+
+interpreter.os = True
+interpreter.llm.supports_vision = True
+
+interpreter.llm.model = "gpt-4o"
+
+interpreter.llm.supports_functions = True
+interpreter.llm.context_window = 110000
+interpreter.llm.max_tokens = 4096
+interpreter.auto_run = True
+interpreter.loop = True
+```
+
+# Example YAML Profile
+
+ Make sure YAML profile version is set to 0.2.5
+
+```YAML
+llm:
+ model: "gpt-4-o"
+ temperature: 0
+ # api_key: ... # Your API key, if the API requires it
+ # api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests
+
+# Computer Settings
+computer:
+ import_computer_api: True # Gives OI a helpful Computer API designed for code interpreting language models
+
+# Custom Instructions
+custom_instructions: "" # This will be appended to the system message
+
+# General Configuration
+auto_run: False # If True, code will run without asking for confirmation
+offline: False # If True, will disable some online features like checking for updates
+
+version: 0.2.5 # Configuration file version (do not modify)
+```
+
+
+ There are many settings that can be configured. [See them all
+ here](/settings/all-settings)
+
diff --git a/archive/classic_docs/guides/running-locally.mdx b/archive/classic_docs/guides/running-locally.mdx
new file mode 100644
index 0000000000..43804fc896
--- /dev/null
+++ b/archive/classic_docs/guides/running-locally.mdx
@@ -0,0 +1,67 @@
+---
+title: Running Locally
+---
+
+Open Interpreter can be run fully locally.
+
+Users need to install software to run local LLMs. Open Interpreter supports multiple local model providers such as [Ollama](https://www.ollama.com/), [Llamafile](https://github.com/Mozilla-Ocho/llamafile), [Jan](https://jan.ai/), and [LM Studio](https://lmstudio.ai/).
+
+
+ Local models perform better with extra guidance and direction. You can improve
+ performance for your use-case by creating a new [Profile](/guides/profiles).
+
+
+## Terminal Usage
+
+### Local Explorer
+
+A Local Explorer was created to simplify the process of using OI locally. To access this menu, run the command `interpreter --local`.
+
+Select your chosen local model provider from the list of options.
+
+Most providers will require the user to state the model they are using. Provider specific instructions are shown to the user in the menu.
+
+### Custom Local
+
+If you want to use a provider other than the ones listed, you will set the `--api_base` flag to set a [custom endpoint](/language-models/local-models/custom-endpoint).
+
+You will also need to set the model by passing in the `--model` flag to select a [model](/settings/all-settings#model-selection).
+
+```python
+interpreter --api_base "http://localhost:11434" --model ollama/codestral
+```
+
+
+ Other terminal flags are explained in [Settings](/settings/all-settings).
+
+
+## Python Usage
+
+In order to have a Python script use Open Interpreter locally, some fields need to be set
+
+```python
+from interpreter import interpreter
+
+interpreter.offline = True
+interpreter.llm.model = "ollama/codestral"
+interpreter.llm.api_base = "http://localhost:11434"
+
+interpreter.chat("how many files are on my desktop?")
+```
+
+## Helpful settings for local models
+
+Local models benefit from more coercion and guidance. This verbosity of adding extra context to messages can impact the conversational experience of Open Interpreter. The following settings allow templates to be applied to messages to improve the steering of the language model while maintaining the natural flow of conversation.
+
+`interpreter.user_message_template` allows users to have their message wrapped in a template. This can be helpful steering a language model to a desired behaviour without needing the user to add extra context to their message.
+
+`interpreter.always_apply_user_message_template` has all user messages to be wrapped in the template. If False, only the last User message will be wrapped.
+
+`interpreter.code_output_template` wraps the output from the computer after code is run. This can help with nudging the language model to continue working or to explain outputs.
+
+`interpreter.empty_code_output_template` is the message that is sent to the language model if code execution results in no output.
+
+
+ Other configuration settings are explained in
+ [Settings](/settings/all-settings).
+
diff --git a/archive/classic_docs/guides/streaming-response.mdx b/archive/classic_docs/guides/streaming-response.mdx
new file mode 100644
index 0000000000..5b00a1279d
--- /dev/null
+++ b/archive/classic_docs/guides/streaming-response.mdx
@@ -0,0 +1,159 @@
+---
+title: Streaming Response
+---
+
+You can stream messages, code, and code outputs out of Open Interpreter by setting `stream=True` in an `interpreter.chat(message)` call.
+
+```python
+for chunk in interpreter.chat("What's 34/24?", stream=True, display=False):
+ print(chunk)
+```
+
+```
+{"role": "assistant", "type": "code", "format": "python", "start": True}
+{"role": "assistant", "type": "code", "format": "python", "content": "34"}
+{"role": "assistant", "type": "code", "format": "python", "content": " /"}
+{"role": "assistant", "type": "code", "format": "python", "content": " "}
+{"role": "assistant", "type": "code", "format": "python", "content": "24"}
+{"role": "assistant", "type": "code", "format": "python", "end": True}
+
+{"role": "computer", "type": "confirmation", "format": "execution", "content": {"type": "code", "format": "python", "content": "34 / 24"}},
+
+{"role": "computer", "type": "console", "start": True}
+{"role": "computer", "type": "console", "format": "active_line", "content": "1"}
+{"role": "computer", "type": "console", "format": "output", "content": "1.4166666666666667\n"}
+{"role": "computer", "type": "console", "format": "active_line", "content": None},
+{"role": "computer", "type": "console", "end": True}
+
+{"role": "assistant", "type": "message", "start": True}
+{"role": "assistant", "type": "message", "content": "The"}
+{"role": "assistant", "type": "message", "content": " result"}
+{"role": "assistant", "type": "message", "content": " of"}
+{"role": "assistant", "type": "message", "content": " the"}
+{"role": "assistant", "type": "message", "content": " division"}
+{"role": "assistant", "type": "message", "content": " "}
+{"role": "assistant", "type": "message", "content": "34"}
+{"role": "assistant", "type": "message", "content": "/"}
+{"role": "assistant", "type": "message", "content": "24"}
+{"role": "assistant", "type": "message", "content": " is"}
+{"role": "assistant", "type": "message", "content": " approximately"}
+{"role": "assistant", "type": "message", "content": " "}
+{"role": "assistant", "type": "message", "content": "1"}
+{"role": "assistant", "type": "message", "content": "."}
+{"role": "assistant", "type": "message", "content": "42"}
+{"role": "assistant", "type": "message", "content": "."}
+{"role": "assistant", "type": "message", "end": True}
+```
+
+**Note:** Setting `display=True` won't change the behavior of the streaming response, it will just render a display in your terminal.
+
+# Anatomy
+
+Each chunk of the streamed response is a dictionary, that has a "role" key that can be either "assistant" or "computer". The "type" key describes what the chunk is. The "content" key contains the actual content of the chunk.
+
+Every 'message' is made up of chunks, and begins with a "start" chunk, and ends with an "end" chunk. This helps you parse the streamed response into messages.
+
+Let's break down each part of the streamed response.
+
+## Code
+
+In this example, the LLM decided to start writing code first. It could have decided to write a message first, or to only write code, or to only write a message.
+
+Every streamed chunk of type "code" has a format key that specifies the language. In this case it decided to write `python`.
+
+This can be any language defined in [our languages directory.](https://github.com/OpenInterpreter/open-interpreter/tree/main/interpreter/core/computer/terminal/languages)
+
+```
+
+{"role": "assistant", "type": "code", "format": "python", "start": True}
+
+```
+
+Then, the LLM decided to write some code. The code is sent token-by-token:
+
+```
+
+{"role": "assistant", "type": "code", "format": "python", "content": "34"}
+{"role": "assistant", "type": "code", "format": "python", "content": " /"}
+{"role": "assistant", "type": "code", "format": "python", "content": " "}
+{"role": "assistant", "type": "code", "format": "python", "content": "24"}
+
+```
+
+When the LLM finishes writing code, it will send an "end" chunk:
+
+```
+
+{"role": "assistant", "type": "code", "format": "python", "end": True}
+
+```
+
+## Code Output
+
+After the LLM finishes writing a code block, Open Interpreter will attempt to run it.
+
+**Before** it runs it, the following chunk is sent:
+
+```
+
+{"role": "computer", "type": "confirmation", "format": "execution", "content": {"type": "code", "language": "python", "code": "34 / 24"}}
+
+```
+
+If you check for this object, you can break (or get confirmation) **before** executing the code.
+
+```python
+# This example asks the user before running code
+
+for chunk in interpreter.chat("What's 34/24?", stream=True):
+ if "executing" in chunk:
+ if input("Press ENTER to run this code.") != "":
+ break
+```
+
+**While** the code is being executed, you'll receive the line of code that's being run:
+
+```
+{"role": "computer", "type": "console", "format": "active_line", "content": "1"}
+```
+
+We use this to highlight the active line of code on our UI, which keeps the user aware of what Open Interpreter is doing.
+
+You'll then receive its output, if it produces any:
+
+```
+{"role": "computer", "type": "console", "format": "output", "content": "1.4166666666666667\n"}
+```
+
+When the code is **finished** executing, this flag will be sent:
+
+```
+{"role": "computer", "type": "console", "end": True}
+```
+
+## Message
+
+Finally, the LLM decided to write a message. This is streamed token-by-token as well:
+
+```
+{"role": "assistant", "type": "message", "start": True}
+{"role": "assistant", "type": "message", "content": "The"}
+{"role": "assistant", "type": "message", "content": " result"}
+{"role": "assistant", "type": "message", "content": " of"}
+{"role": "assistant", "type": "message", "content": " the"}
+{"role": "assistant", "type": "message", "content": " division"}
+{"role": "assistant", "type": "message", "content": " "}
+{"role": "assistant", "type": "message", "content": "34"}
+{"role": "assistant", "type": "message", "content": "/"}
+{"role": "assistant", "type": "message", "content": "24"}
+{"role": "assistant", "type": "message", "content": " is"}
+{"role": "assistant", "type": "message", "content": " approximately"}
+{"role": "assistant", "type": "message", "content": " "}
+{"role": "assistant", "type": "message", "content": "1"}
+{"role": "assistant", "type": "message", "content": "."}
+{"role": "assistant", "type": "message", "content": "42"}
+{"role": "assistant", "type": "message", "content": "."}
+{"role": "assistant", "type": "message", "end": True}
+```
+
+For an example in JavaScript on how you might process these streamed chunks, see the [migration guide](https://github.com/OpenInterpreter/open-interpreter/blob/main/docs/NCU_MIGRATION_GUIDE.md)
diff --git a/archive/classic_docs/integrations/docker.mdx b/archive/classic_docs/integrations/docker.mdx
new file mode 100644
index 0000000000..7178b90208
--- /dev/null
+++ b/archive/classic_docs/integrations/docker.mdx
@@ -0,0 +1,64 @@
+---
+title: Docker
+---
+
+Docker support is currently experimental. Running Open Interpreter inside of a Docker container may not function as you expect. Let us know on [Discord](https://discord.com/invite/6p3fD6rBVm) if you encounter errors or have suggestions to improve Docker support.
+
+We are working on an official integration for Docker in the coming weeks. For now, you can use Open Interpreter in a sandboxed Docker container environment using the following steps:
+
+1. If you do not have Docker Desktop installed, [install it](https://www.docker.com/products/docker-desktop) before proceeding.
+
+2. Create a new directory and add a file named `Dockerfile` in it with the following contents:
+
+```dockerfile
+# Start with Python 3.11
+FROM python:3.11
+
+# Replace with your own key
+ENV OPENAI_API_KEY
+
+# Install Open Interpreter
+RUN pip install open-interpreter
+```
+
+3. Run the following commands in the same directory to start Open Interpreter.
+
+```bash
+docker build -t openinterpreter .
+docker run -d -it --name interpreter-instance openinterpreter interpreter
+docker attach interpreter-instance
+```
+
+## Mounting Volumes
+
+This is how you let it access _some_ files, by telling it a folder (a volume) it will be able to see / manipulate.
+
+To mount a volume, you can use the `-v` flag followed by the path to the directory on your host machine, a colon, and then the path where you want to mount the directory in the container.
+
+```bash
+docker run -d -it -v /path/on/your/host:/path/in/the/container --name interpreter-instance openinterpreter interpreter
+```
+
+Replace `/path/on/your/host` with the path to the directory on your host machine that you want to mount, and replace `/path/in/the/container` with the path in the Docker container where you want to mount the directory.
+
+Here's a simple example:
+
+```bash
+docker run -d -it -v $(pwd):/files --name interpreter-instance openinterpreter interpreter
+```
+
+In this example, `$(pwd)` is your current directory, and it is mounted to a `/files` directory in the Docker container (this creates that folder too).
+
+## Flags
+
+To add flags to the command, just append them after `interpreter`. For example, to run the interpreter with custom instructions, run the following command:
+
+```bash
+docker-compose run --rm oi interpreter --custom_instructions "Be as concise as possible"
+```
+
+Please note that some flags will not work. For example, `--config` will not work, because it cannot open the config file in the container. If you want to use a config file other than the default, you can create a `config.yml` file inside of the same directory, add your custom config, and then run the following command:
+
+```bash
+docker-compose run --rm oi interpreter --config_file config.yml
+```
\ No newline at end of file
diff --git a/archive/classic_docs/integrations/e2b.mdx b/archive/classic_docs/integrations/e2b.mdx
new file mode 100644
index 0000000000..a57be74094
--- /dev/null
+++ b/archive/classic_docs/integrations/e2b.mdx
@@ -0,0 +1,72 @@
+---
+title: E2B
+---
+
+[E2B](https://e2b.dev/) is a secure, sandboxed environment where you can run arbitrary code.
+
+To build this integration, you just need to replace Open Interpreter's `python` (which runs locally) with a `python` that runs on E2B.
+
+First, [get an API key here](https://e2b.dev/), and set it:
+
+```python
+import os
+os.environ["E2B_API_KEY"] = ""
+```
+
+Then, define a custom language for Open Interpreter. The class name doesn't matter, but we'll call it `PythonE2B`:
+
+```python
+import e2b
+
+class PythonE2B:
+ """
+ This class contains all requirements for being a custom language in Open Interpreter:
+
+ - name (an attribute)
+ - run (a method)
+ - stop (a method)
+ - terminate (a method)
+
+ Here, we'll use E2B to power the `run` method.
+ """
+
+ # This is the name that will appear to the LLM.
+ name = "python"
+
+ # Optionally, you can append some information about this language to the system message:
+ system_message = "# Follow this rule: Every Python code block MUST contain at least one print statement."
+
+ # (E2B isn't a Jupyter Notebook, so we added ^ this so it would print things,
+ # instead of putting variables at the end of code blocks, which is a Jupyter thing.)
+
+ def run(self, code):
+ """Generator that yields a dictionary in LMC Format."""
+
+ # Run the code on E2B
+ stdout, stderr = e2b.run_code('Python3', code)
+
+ # Yield the output
+ yield {
+ "type": "console", "format": "output",
+ "content": stdout + stderr # We combined these arbitrarily. Yield anything you'd like!
+ }
+
+ def stop(self):
+ """Stops the code."""
+ # Not needed here, because e2b.run_code isn't stateful.
+ pass
+
+ def terminate(self):
+ """Terminates the entire process."""
+ # Not needed here, because e2b.run_code isn't stateful.
+ pass
+
+# (Tip: Do this before adding/removing languages, otherwise OI might retain the state of previous languages:)
+interpreter.computer.terminate()
+
+# Give Open Interpreter its languages. This will only let it run PythonE2B:
+interpreter.computer.languages = [PythonE2B]
+
+# Try it out!
+interpreter.chat("What's 349808*38490739?")
+```
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/custom-models.mdx b/archive/classic_docs/language-models/custom-models.mdx
new file mode 100644
index 0000000000..b48f17db91
--- /dev/null
+++ b/archive/classic_docs/language-models/custom-models.mdx
@@ -0,0 +1,41 @@
+---
+title: Custom Models
+---
+
+In addition to hosted and local language models, Open Interpreter also supports custom models.
+
+As long as your system can accept an input and stream an output (and can be interacted with via a Python generator) it can be used as a language model in Open Interpreter.
+
+Simply replace the OpenAI-compatible `completions` function in your language model with one of your own:
+
+```python
+def custom_language_model(messages, model, stream, max_tokens):
+ """
+ OpenAI-compatible completions function (this one just echoes what the user said back).
+ To make it OpenAI-compatible and parsable, `choices` has to be the root property.
+ The property `delta` is used to signify streaming.
+ """
+ users_content = messages[-1].get("content") # Get last message's content
+
+ for character in users_content:
+ yield {"choices": [{"delta": {"content": character}}]}
+
+# Tell Open Interpreter to power the language model with this function
+
+interpreter.llm.completions = custom_language_model
+```
+
+Then, set the following settings:
+
+```
+interpreter.llm.context_window = 2000 # In tokens
+interpreter.llm.max_tokens = 1000 # In tokens
+interpreter.llm.supports_vision = False # Does this completions endpoint accept images?
+interpreter.llm.supports_functions = False # Does this completions endpoint accept/return function calls?
+```
+
+And start using it:
+
+```
+interpreter.chat("Hi!") # Returns/displays "Hi!" character by character
+```
diff --git a/archive/classic_docs/language-models/hosted-models/ai21.mdx b/archive/classic_docs/language-models/hosted-models/ai21.mdx
new file mode 100644
index 0000000000..9a9496327b
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/ai21.mdx
@@ -0,0 +1,48 @@
+---
+title: AI21
+---
+
+To use Open Interpreter with a model from AI21, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model j2-light
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "j2-light"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model from [AI21:](https://www.ai21.com/)
+
+
+
+```bash Terminal
+interpreter --model j2-light
+interpreter --model j2-mid
+interpreter --model j2-ultra
+```
+
+```python Python
+interpreter.llm.model = "j2-light"
+interpreter.llm.model = "j2-mid"
+interpreter.llm.model = "j2-ultra"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | ------------ | -------------- |
+| `AI21_API_KEY` | The API key for authenticating to AI21's services. | [AI21 Account Page](https://www.ai21.com/account/api-keys) |
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/anthropic.mdx b/archive/classic_docs/language-models/hosted-models/anthropic.mdx
new file mode 100644
index 0000000000..283540f8df
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/anthropic.mdx
@@ -0,0 +1,48 @@
+---
+title: Anthropic
+---
+
+To use Open Interpreter with a model from Anthropic, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model claude-instant-1
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "claude-instant-1"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model from [Anthropic:](https://www.anthropic.com/)
+
+
+
+```bash Terminal
+interpreter --model claude-instant-1
+interpreter --model claude-instant-1.2
+interpreter --model claude-2
+```
+
+```python Python
+interpreter.llm.model = "claude-instant-1"
+interpreter.llm.model = "claude-instant-1.2"
+interpreter.llm.model = "claude-2"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | ------------ | -------------- |
+| `ANTHROPIC_API_KEY` | The API key for authenticating to Anthropic's services. | [Anthropic](https://www.anthropic.com/) |
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/anyscale.mdx b/archive/classic_docs/language-models/hosted-models/anyscale.mdx
new file mode 100644
index 0000000000..0338a6634f
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/anyscale.mdx
@@ -0,0 +1,60 @@
+---
+title: Anyscale
+---
+
+To use Open Interpreter with a model from Anyscale, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model anyscale/
+```
+
+```python Python
+from interpreter import interpreter
+
+# Set the model to use from AWS Bedrock:
+interpreter.llm.model = "anyscale/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from Anyscale:
+
+- Llama 2 7B Chat
+- Llama 2 13B Chat
+- Llama 2 70B Chat
+- Mistral 7B Instruct
+- CodeLlama 34b Instruct
+
+
+
+```bash Terminal
+interpreter --model anyscale/meta-llama/Llama-2-7b-chat-hf
+interpreter --model anyscale/meta-llama/Llama-2-13b-chat-hf
+interpreter --model anyscale/meta-llama/Llama-2-70b-chat-hf
+interpreter --model anyscale/mistralai/Mistral-7B-Instruct-v0.1
+interpreter --model anyscale/codellama/CodeLlama-34b-Instruct-hf
+```
+
+```python Python
+interpreter.llm.model = "anyscale/meta-llama/Llama-2-7b-chat-hf"
+interpreter.llm.model = "anyscale/meta-llama/Llama-2-13b-chat-hf"
+interpreter.llm.model = "anyscale/meta-llama/Llama-2-70b-chat-hf"
+interpreter.llm.model = "anyscale/mistralai/Mistral-7B-Instruct-v0.1"
+interpreter.llm.model = "anyscale/codellama/CodeLlama-34b-Instruct-hf"
+
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | -------------------------------------- | --------------------------------------------------------------------------- |
+| `ANYSCALE_API_KEY` | The API key for your Anyscale account. | [Anyscale Account Settings](https://app.endpoints.anyscale.com/credentials) |
diff --git a/archive/classic_docs/language-models/hosted-models/aws-sagemaker.mdx b/archive/classic_docs/language-models/hosted-models/aws-sagemaker.mdx
new file mode 100644
index 0000000000..88205ef83a
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/aws-sagemaker.mdx
@@ -0,0 +1,70 @@
+---
+title: AWS Sagemaker
+---
+
+To use Open Interpreter with a model from AWS Sagemaker, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model sagemaker/
+```
+
+```python Python
+# Sagemaker requires boto3 to be installed on your machine:
+!pip install boto3
+
+from interpreter import interpreter
+
+interpreter.llm.model = "sagemaker/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from AWS Sagemaker:
+
+- Meta Llama 2 7B
+- Meta Llama 2 7B (Chat/Fine-tuned)
+- Meta Llama 2 13B
+- Meta Llama 2 13B (Chat/Fine-tuned)
+- Meta Llama 2 70B
+- Meta Llama 2 70B (Chat/Fine-tuned)
+- Your Custom Huggingface Model
+
+
+
+```bash Terminal
+
+interpreter --model sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b
+interpreter --model sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b-f
+interpreter --model sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b
+interpreter --model sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b-f
+interpreter --model sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b
+interpreter --model sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b-b-f
+interpreter --model sagemaker/
+```
+
+```python Python
+interpreter.llm.model = "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b"
+interpreter.llm.model = "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b-f"
+interpreter.llm.model = "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b"
+interpreter.llm.model = "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b-f"
+interpreter.llm.model = "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b"
+interpreter.llm.model = "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b-b-f"
+interpreter.llm.model = "sagemaker/"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| ----------------------- | ----------------------------------------------- | ----------------------------------------------------------------------------------- |
+| `AWS_ACCESS_KEY_ID` | The API access key for your AWS account. | [AWS Account Overview -> Security Credentials](https://console.aws.amazon.com/) |
+| `AWS_SECRET_ACCESS_KEY` | The API secret access key for your AWS account. | [AWS Account Overview -> Security Credentials](https://console.aws.amazon.com/) |
+| `AWS_REGION_NAME` | The AWS region you want to use | [AWS Account Overview -> Navigation bar -> Region](https://console.aws.amazon.com/) |
diff --git a/archive/classic_docs/language-models/hosted-models/azure.mdx b/archive/classic_docs/language-models/hosted-models/azure.mdx
new file mode 100644
index 0000000000..289c42f8ad
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/azure.mdx
@@ -0,0 +1,30 @@
+---
+title: Azure
+---
+
+To use a model from Azure, set the `model` flag to begin with `azure/`:
+
+
+
+```bash Terminal
+interpreter --model azure/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "azure/"
+interpreter.chat()
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | ------------ | -------------- |
+| `AZURE_API_KEY` | The API key for authenticating to Azure's services. | [Azure Account Page](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) |
+| `AZURE_API_BASE` | The base URL for Azure's services. | [Azure Account Page](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) |
+| `AZURE_API_VERSION` | The version of Azure's services. | [Azure Account Page](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps) |
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/baseten.mdx b/archive/classic_docs/language-models/hosted-models/baseten.mdx
new file mode 100644
index 0000000000..45ce940002
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/baseten.mdx
@@ -0,0 +1,57 @@
+---
+title: Baseten
+---
+
+To use Open Interpreter with Baseten, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model baseten/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "baseten/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from Baseten:
+
+- Falcon 7b (qvv0xeq)
+- Wizard LM (q841o8w)
+- MPT 7b Base (31dxrj3)
+
+
+
+```bash Terminal
+
+interpreter --model baseten/qvv0xeq
+interpreter --model baseten/q841o8w
+interpreter --model baseten/31dxrj3
+
+
+```
+
+```python Python
+interpreter.llm.model = "baseten/qvv0xeq"
+interpreter.llm.model = "baseten/q841o8w"
+interpreter.llm.model = "baseten/31dxrj3"
+
+
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | --------------- | -------------------------------------------------------------------------------------------------------- |
+| BASETEN_API_KEY'` | Baseten API key | [Baseten Dashboard -> Settings -> Account -> API Keys](https://app.baseten.co/settings/account/api_keys) |
diff --git a/archive/classic_docs/language-models/hosted-models/cloudflare.mdx b/archive/classic_docs/language-models/hosted-models/cloudflare.mdx
new file mode 100644
index 0000000000..765079c3e9
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/cloudflare.mdx
@@ -0,0 +1,59 @@
+---
+title: Cloudflare Workers AI
+---
+
+To use Open Interpreter with the Cloudflare Workers AI API, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model cloudflare/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "cloudflare/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from Cloudflare Workers AI:
+
+- Llama-2 7b chat fp16
+- Llama-2 7b chat int8
+- Mistral 7b instruct v0.1
+- CodeLlama 7b instruct awq
+
+
+
+```bash Terminal
+
+interpreter --model cloudflare/@cf/meta/llama-2-7b-chat-fp16
+interpreter --model cloudflare/@cf/meta/llama-2-7b-chat-int8
+interpreter --model @cf/mistral/mistral-7b-instruct-v0.1
+interpreter --model @hf/thebloke/codellama-7b-instruct-awq
+
+```
+
+```python Python
+interpreter.llm.model = "cloudflare/@cf/meta/llama-2-7b-chat-fp16"
+interpreter.llm.model = "cloudflare/@cf/meta/llama-2-7b-chat-int8"
+interpreter.llm.model = "@cf/mistral/mistral-7b-instruct-v0.1"
+interpreter.llm.model = "@hf/thebloke/codellama-7b-instruct-awq"
+
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| ----------------------- | -------------------------- | ---------------------------------------------------------------------------------------------- |
+| `CLOUDFLARE_API_KEY'` | Cloudflare API key | [Cloudflare Profile Page -> API Tokens](https://dash.cloudflare.com/profile/api-tokens) |
+| `CLOUDFLARE_ACCOUNT_ID` | Your Cloudflare account ID | [Cloudflare Dashboard -> Grab the Account ID from the url like: https://dash.cloudflare.com/{CLOUDFLARE_ACCOUNT_ID}?account= ](https://dash.cloudflare.com/) |
diff --git a/archive/classic_docs/language-models/hosted-models/cohere.mdx b/archive/classic_docs/language-models/hosted-models/cohere.mdx
new file mode 100644
index 0000000000..e0c7573278
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/cohere.mdx
@@ -0,0 +1,54 @@
+---
+title: Cohere
+---
+
+To use Open Interpreter with a model from Cohere, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model command-nightly
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "command-nightly"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model on [Cohere's models page:](https://www.cohere.ai/models)
+
+
+
+```bash Terminal
+interpreter --model command
+interpreter --model command-light
+interpreter --model command-medium
+interpreter --model command-medium-beta
+interpreter --model command-xlarge-beta
+interpreter --model command-nightly
+```
+
+```python Python
+interpreter.llm.model = "command"
+interpreter.llm.model = "command-light"
+interpreter.llm.model = "command-medium"
+interpreter.llm.model = "command-medium-beta"
+interpreter.llm.model = "command-xlarge-beta"
+interpreter.llm.model = "command-nightly"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | ------------ | -------------- |
+| `COHERE_API_KEY` | The API key for authenticating to Cohere's services. | [Cohere Account Page](https://app.cohere.ai/login) |
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/deepinfra.mdx b/archive/classic_docs/language-models/hosted-models/deepinfra.mdx
new file mode 100644
index 0000000000..1b56f10025
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/deepinfra.mdx
@@ -0,0 +1,64 @@
+---
+title: DeepInfra
+---
+
+To use Open Interpreter with DeepInfra, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model deepinfra/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "deepinfra/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from DeepInfra:
+
+- Llama-2 70b chat hf
+- Llama-2 7b chat hf
+- Llama-2 13b chat hf
+- CodeLlama 34b instruct awq
+- Mistral 7b instruct v0.1
+- jondurbin/airoboros I2 70b gpt3 1.4.1
+
+
+
+```bash Terminal
+
+interpreter --model deepinfra/meta-llama/Llama-2-70b-chat-hf
+interpreter --model deepinfra/meta-llama/Llama-2-7b-chat-hf
+interpreter --model deepinfra/meta-llama/Llama-2-13b-chat-hf
+interpreter --model deepinfra/codellama/CodeLlama-34b-Instruct-hf
+interpreter --model deepinfra/mistral/mistral-7b-instruct-v0.1
+interpreter --model deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1
+
+```
+
+```python Python
+interpreter.llm.model = "deepinfra/meta-llama/Llama-2-70b-chat-hf"
+interpreter.llm.model = "deepinfra/meta-llama/Llama-2-7b-chat-hf"
+interpreter.llm.model = "deepinfra/meta-llama/Llama-2-13b-chat-hf"
+interpreter.llm.model = "deepinfra/codellama/CodeLlama-34b-Instruct-hf"
+interpreter.llm.model = "deepinfra/mistral-7b-instruct-v0.1"
+interpreter.llm.model = "deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1"
+
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | ----------------- | ---------------------------------------------------------------------- |
+| `DEEPINFRA_API_KEY'` | DeepInfra API key | [DeepInfra Dashboard -> API Keys](https://deepinfra.com/dash/api_keys) |
diff --git a/archive/classic_docs/language-models/hosted-models/gpt-4-setup.mdx b/archive/classic_docs/language-models/hosted-models/gpt-4-setup.mdx
new file mode 100644
index 0000000000..0bb1d7a33b
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/gpt-4-setup.mdx
@@ -0,0 +1,55 @@
+---
+title: GPT-4 Setup
+---
+
+# Setting Up GPT-4
+
+Step 1 - Install OpenAI packages
+
+```
+pip install openai
+```
+
+Step 2 - create a new API key at [https://platform.openai.com/api-keys](https://platform.openai.com/api-keys)
+
+
+
+Step 3 - Run the interpreter command after installing open-interpreter and enter your newly generated api key
+
+
+
+or
+
+**FOR MACOS :**
+
+1. **Open Terminal**: You can find it in the Applications folder or search for it using Spotlight (Command + Space).
+2. **Edit Bash Profile**: Use the command `nano ~/.bash_profile` or `nano ~/.zshrc` (for newer MacOS versions) to open the profile file in a text editor.
+3. **Add Environment Variable**: In the editor, add the line below, replacing `your-api-key-here` with your actual API key:
+
+ ```
+ export OPENAI\_API\_KEY='your-api-key-here'
+ ```
+
+4. **Save and Exit**: Press Ctrl+O to write the changes, followed by Ctrl+X to close the editor.
+5. **Load Your Profile**: Use the command `source ~/.bash_profile` or `source ~/.zshrc` to load the updated profile.
+6. **Verification**: Verify the setup by typing `echo $OPENAI_API_KEY` in the terminal. It should display your API key.
+
+**FOR WINDOWS :**
+
+1. **Open Command Prompt**: You can find it by searching "cmd" in the start menu.
+2. **Set environment variable in the current session**: To set the environment variable in the current session, use the command below, replacing `your-api-key-here` with your actual API key:
+
+ ```
+ setx OPENAI\_API\_KEY "your-api-key-here"
+ ```
+
+ This command will set the OPENAI_API_KEY environment variable for the current session.
+
+3. **Permanent setup**: To make the setup permanent, add the variable through the system properties as follows:
+
+ - Right-click on 'This PC' or 'My Computer' and select 'Properties'.
+ - Click on 'Advanced system settings'.
+ - Click the 'Environment Variables' button.
+ - In the 'System variables' section, click 'New...' and enter OPENAI_API_KEY as the variable name and your API key as the variable value.
+
+4. **Verification**: To verify the setup, reopen the command prompt and type the command below. It should display your API key: `echo %OPENAI_API_KEY%`
diff --git a/archive/classic_docs/language-models/hosted-models/huggingface.mdx b/archive/classic_docs/language-models/hosted-models/huggingface.mdx
new file mode 100644
index 0000000000..a8b2d8f187
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/huggingface.mdx
@@ -0,0 +1,48 @@
+---
+title: Huggingface
+---
+
+To use Open Interpreter with Huggingface models, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model huggingface/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "huggingface/"
+interpreter.chat()
+```
+
+
+
+You may also need to specify your Huggingface api base url:
+
+
+```bash Terminal
+interpreter --api_base
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.api_base = "https://my-endpoint.huggingface.cloud"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+Open Interpreter should work with almost any text based hugging face model.
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| ---------------------- | --------------------------- | ---------------------------------------------------------------------------------- |
+| `HUGGINGFACE_API_KEY'` | Huggingface account API key | [Huggingface -> Settings -> Access Tokens](https://huggingface.co/settings/tokens) |
diff --git a/archive/classic_docs/language-models/hosted-models/mistral-api.mdx b/archive/classic_docs/language-models/hosted-models/mistral-api.mdx
new file mode 100644
index 0000000000..67b83f1874
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/mistral-api.mdx
@@ -0,0 +1,53 @@
+---
+title: Mistral AI API
+---
+
+To use Open Interpreter with the Mistral API, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model mistral/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "mistral/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from the Mistral API:
+
+- mistral-tiny
+- mistral-small
+- mistral-medium
+
+
+
+```bash Terminal
+
+interpreter --model mistral/mistral-tiny
+interpreter --model mistral/mistral-small
+interpreter --model mistral/mistral-medium
+```
+
+```python Python
+interpreter.llm.model = "mistral/mistral-tiny"
+interpreter.llm.model = "mistral/mistral-small"
+interpreter.llm.model = "mistral/mistral-medium"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | -------------------------------------------- | -------------------------------------------------- |
+| `MISTRAL_API_KEY` | The Mistral API key from Mistral API Console | [Mistral API Console](https://console.mistral.ai/user/api-keys/) |
diff --git a/archive/classic_docs/language-models/hosted-models/nlp-cloud.mdx b/archive/classic_docs/language-models/hosted-models/nlp-cloud.mdx
new file mode 100644
index 0000000000..de1adaee83
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/nlp-cloud.mdx
@@ -0,0 +1,28 @@
+---
+title: NLP Cloud
+---
+
+To use Open Interpreter with NLP Cloud, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model dolphin
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "dolphin"
+interpreter.chat()
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | ----------------- | ----------------------------------------------------------------- |
+| `NLP_CLOUD_API_KEY'` | NLP Cloud API key | [NLP Cloud Dashboard -> API KEY](https://nlpcloud.com/home/token) |
diff --git a/archive/classic_docs/language-models/hosted-models/openai.mdx b/archive/classic_docs/language-models/hosted-models/openai.mdx
new file mode 100644
index 0000000000..d75b862f8a
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/openai.mdx
@@ -0,0 +1,62 @@
+---
+title: OpenAI
+---
+
+To use Open Interpreter with a model from OpenAI, simply run:
+
+
+
+```bash Terminal
+interpreter
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.chat()
+```
+
+
+
+This will default to `gpt-4-turbo`, which is the most capable publicly available model for code interpretation (Open Interpreter was designed to be used with `gpt-4`).
+
+To run a specific model from OpenAI, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model gpt-3.5-turbo
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "gpt-3.5-turbo"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model on [OpenAI's models page:](https://platform.openai.com/docs/models/)
+
+
+
+```bash Terminal
+interpreter --model gpt-4o
+```
+
+```python Python
+interpreter.llm.model = "gpt-4o"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | ---------------------------------------------------- | ------------------------------------------------------------------- |
+| `OPENAI_API_KEY` | The API key for authenticating to OpenAI's services. | [OpenAI Account Page](https://platform.openai.com/account/api-keys) |
diff --git a/archive/classic_docs/language-models/hosted-models/openrouter.mdx b/archive/classic_docs/language-models/hosted-models/openrouter.mdx
new file mode 100644
index 0000000000..914c08dcef
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/openrouter.mdx
@@ -0,0 +1,64 @@
+---
+title: OpenRouter
+---
+
+To use Open Interpreter with a model from OpenRouter, set the `model` flag to begin with `openrouter/`:
+
+
+
+```bash Terminal
+interpreter --model openrouter/openai/gpt-3.5-turbo
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "openrouter/openai/gpt-3.5-turbo"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model on [OpenRouter's models page:](https://openrouter.ai/models)
+
+
+
+```bash Terminal
+interpreter --model openrouter/openai/gpt-3.5-turbo
+interpreter --model openrouter/openai/gpt-3.5-turbo-16k
+interpreter --model openrouter/openai/gpt-4
+interpreter --model openrouter/openai/gpt-4-32k
+interpreter --model openrouter/anthropic/claude-2
+interpreter --model openrouter/anthropic/claude-instant-v1
+interpreter --model openrouter/google/palm-2-chat-bison
+interpreter --model openrouter/google/palm-2-codechat-bison
+interpreter --model openrouter/meta-llama/llama-2-13b-chat
+interpreter --model openrouter/meta-llama/llama-2-70b-chat
+```
+
+```python Python
+interpreter.llm.model = "openrouter/openai/gpt-3.5-turbo"
+interpreter.llm.model = "openrouter/openai/gpt-3.5-turbo-16k"
+interpreter.llm.model = "openrouter/openai/gpt-4"
+interpreter.llm.model = "openrouter/openai/gpt-4-32k"
+interpreter.llm.model = "openrouter/anthropic/claude-2"
+interpreter.llm.model = "openrouter/anthropic/claude-instant-v1"
+interpreter.llm.model = "openrouter/google/palm-2-chat-bison"
+interpreter.llm.model = "openrouter/google/palm-2-codechat-bison"
+interpreter.llm.model = "openrouter/meta-llama/llama-2-13b-chat"
+interpreter.llm.model = "openrouter/meta-llama/llama-2-70b-chat"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | ------------ | -------------- |
+| `OPENROUTER_API_KEY` | The API key for authenticating to OpenRouter's services. | [OpenRouter Account Page](https://openrouter.ai/keys) |
+| `OR_SITE_URL` | The site URL for OpenRouter's services. | [OpenRouter Account Page](https://openrouter.ai/keys) |
+| `OR_APP_NAME` | The app name for OpenRouter's services. | [OpenRouter Account Page](https://openrouter.ai/keys) |
diff --git a/archive/classic_docs/language-models/hosted-models/palm.mdx b/archive/classic_docs/language-models/hosted-models/palm.mdx
new file mode 100644
index 0000000000..dc6078e085
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/palm.mdx
@@ -0,0 +1,28 @@
+---
+title: PaLM API - Google
+---
+
+To use Open Interpreter with PaLM, you must `pip install -q google-generativeai`, then set the `model` flag in Open Interpreter:
+
+
+
+```bash Terminal
+interpreter --model palm/chat-bison
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "palm/chat-bison"
+interpreter.chat()
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | ---------------------------------------------------------------- | ------------------------------------------------------------------------------------ |
+| `PALM_API_KEY` | The PaLM API key from Google Generative AI Developers dashboard. | [Google Generative AI Developers Dashboard](https://developers.generativeai.google/) |
diff --git a/archive/classic_docs/language-models/hosted-models/perplexity.mdx b/archive/classic_docs/language-models/hosted-models/perplexity.mdx
new file mode 100644
index 0000000000..6af649d5c7
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/perplexity.mdx
@@ -0,0 +1,80 @@
+---
+title: Perplexity
+---
+
+To use Open Interpreter with the Perplexity API, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model perplexity/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "perplexity/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support the following completion models from the Perplexity API:
+
+- pplx-7b-chat
+- pplx-70b-chat
+- pplx-7b-online
+- pplx-70b-online
+- codellama-34b-instruct
+- llama-2-13b-chat
+- llama-2-70b-chat
+- mistral-7b-instruct
+- openhermes-2-mistral-7b
+- openhermes-2.5-mistral-7b
+- pplx-7b-chat-alpha
+- pplx-70b-chat-alpha
+
+
+
+```bash Terminal
+
+interpreter --model perplexity/pplx-7b-chat
+interpreter --model perplexity/pplx-70b-chat
+interpreter --model perplexity/pplx-7b-online
+interpreter --model perplexity/pplx-70b-online
+interpreter --model perplexity/codellama-34b-instruct
+interpreter --model perplexity/llama-2-13b-chat
+interpreter --model perplexity/llama-2-70b-chat
+interpreter --model perplexity/mistral-7b-instruct
+interpreter --model perplexity/openhermes-2-mistral-7b
+interpreter --model perplexity/openhermes-2.5-mistral-7b
+interpreter --model perplexity/pplx-7b-chat-alpha
+interpreter --model perplexity/pplx-70b-chat-alpha
+```
+
+```python Python
+interpreter.llm.model = "perplexity/pplx-7b-chat"
+interpreter.llm.model = "perplexity/pplx-70b-chat"
+interpreter.llm.model = "perplexity/pplx-7b-online"
+interpreter.llm.model = "perplexity/pplx-70b-online"
+interpreter.llm.model = "perplexity/codellama-34b-instruct"
+interpreter.llm.model = "perplexity/llama-2-13b-chat"
+interpreter.llm.model = "perplexity/llama-2-70b-chat"
+interpreter.llm.model = "perplexity/mistral-7b-instruct"
+interpreter.llm.model = "perplexity/openhermes-2-mistral-7b"
+interpreter.llm.model = "perplexity/openhermes-2.5-mistral-7b"
+interpreter.llm.model = "perplexity/pplx-7b-chat-alpha"
+interpreter.llm.model = "perplexity/pplx-70b-chat-alpha"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| ----------------------- | ------------------------------------ | ----------------------------------------------------------------- |
+| `PERPLEXITYAI_API_KEY'` | The Perplexity API key from pplx-api | [Perplexity API Settings](https://www.perplexity.ai/settings/api) |
diff --git a/archive/classic_docs/language-models/hosted-models/petals.mdx b/archive/classic_docs/language-models/hosted-models/petals.mdx
new file mode 100644
index 0000000000..bad434cc10
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/petals.mdx
@@ -0,0 +1,50 @@
+---
+title: Petals
+---
+
+To use Open Interpreter with a model from Petals, set the `model` flag to begin with `petals/`:
+
+
+
+```bash Terminal
+interpreter --model petals/petals-team/StableBeluga2
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "petals/petals-team/StableBeluga2"
+interpreter.chat()
+```
+
+
+
+# Pre-Requisites
+
+Ensure you have petals installed:
+
+```bash Terminal
+pip install git+https://github.com/bigscience-workshop/petals
+```
+
+# Supported Models
+
+We support any model on [Petals:](https://github.com/bigscience-workshop/petals)
+
+
+
+```bash Terminal
+interpreter --model petals/petals-team/StableBeluga2
+interpreter --model petals/huggyllama/llama-65b
+```
+
+```python Python
+interpreter.llm.model = "petals/petals-team/StableBeluga2"
+interpreter.llm.model = "petals/huggyllama/llama-65b"
+```
+
+
+
+# Required Environment Variables
+
+No environment variables are required to use these models.
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/replicate.mdx b/archive/classic_docs/language-models/hosted-models/replicate.mdx
new file mode 100644
index 0000000000..f1bb0ccfab
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/replicate.mdx
@@ -0,0 +1,50 @@
+---
+title: Replicate
+---
+
+To use Open Interpreter with a model from Replicate, set the `model` flag to begin with `replicate/`:
+
+
+
+```bash Terminal
+interpreter --model replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model on [Replicate's models page:](https://replicate.ai/explore)
+
+
+
+```bash Terminal
+interpreter --model replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf
+interpreter --model replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52
+interpreter --model replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b
+interpreter --model replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f
+```
+
+```python Python
+interpreter.llm.model = "replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf"
+interpreter.llm.model = "replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52"
+interpreter.llm.model = "replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b"
+interpreter.llm.model = "replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f"
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | ------------ | -------------- |
+| `REPLICATE_API_KEY` | The API key for authenticating to Replicate's services. | [Replicate Account Page](https://replicate.ai/login) |
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/togetherai.mdx b/archive/classic_docs/language-models/hosted-models/togetherai.mdx
new file mode 100644
index 0000000000..68b4d66065
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/togetherai.mdx
@@ -0,0 +1,32 @@
+---
+title: Together AI
+---
+
+To use Open Interpreter with Together AI, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model together_ai/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "together_ai/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+All models on Together AI are supported.
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| --------------------- | --------------------------------------------- | ------------------------------------------------------------------------------------------- |
+| `TOGETHERAI_API_KEY'` | The TogetherAI API key from the Settings page | [TogetherAI -> Profile -> Settings -> API Keys](https://api.together.xyz/settings/api-keys) |
diff --git a/archive/classic_docs/language-models/hosted-models/vertex-ai.mdx b/archive/classic_docs/language-models/hosted-models/vertex-ai.mdx
new file mode 100644
index 0000000000..1275aaa3cf
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/vertex-ai.mdx
@@ -0,0 +1,48 @@
+---
+title: Google (Vertex AI)
+---
+
+## Pre-requisites
+* `pip install google-cloud-aiplatform`
+* Authentication:
+ * run `gcloud auth application-default login` See [Google Cloud Docs](https://cloud.google.com/docs/authentication/external/set-up-adc)
+ * Alternatively you can set `application_default_credentials.json`
+
+To use Open Interpreter with Google's Vertex AI API, set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model gemini-pro
+interpreter --model gemini-pro-vision
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "gemini-pro"
+interpreter.llm.model = "gemini-pro-vision"
+interpreter.chat()
+```
+
+
+
+# Required Environment Variables
+
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+Environment Variable | Description | Where to Find |
+--------------------- | ------------ | -------------- |
+`VERTEXAI_PROJECT` | The Google Cloud project ID. | [Google Cloud Console](https://console.cloud.google.com/vertex-ai) |
+`VERTEXAI_LOCATION` | The location of your Vertex AI resources. | [Google Cloud Console](https://console.cloud.google.com/vertex-ai) |
+
+## Supported Models
+
+- gemini-pro
+- gemini-pro-vision
+- chat-bison-32k
+- chat-bison
+- chat-bison@001
+- codechat-bison
+- codechat-bison-32k
+- codechat-bison@001
\ No newline at end of file
diff --git a/archive/classic_docs/language-models/hosted-models/vllm.mdx b/archive/classic_docs/language-models/hosted-models/vllm.mdx
new file mode 100644
index 0000000000..e2dc2e311b
--- /dev/null
+++ b/archive/classic_docs/language-models/hosted-models/vllm.mdx
@@ -0,0 +1,44 @@
+---
+title: vLLM
+---
+
+To use Open Interpreter with vLLM, you will need to:
+
+1. `pip install vllm`
+2. Set the api_base flag:
+
+
+
+```bash Terminal
+interpreter --api_base
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.api_base = ""
+interpreter.chat()
+```
+
+
+
+3. Set the `model` flag:
+
+
+
+```bash Terminal
+interpreter --model vllm/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "vllm/"
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+All models from VLLM should be supported
diff --git a/archive/classic_docs/language-models/introduction.mdx b/archive/classic_docs/language-models/introduction.mdx
new file mode 100644
index 0000000000..fd0d364af8
--- /dev/null
+++ b/archive/classic_docs/language-models/introduction.mdx
@@ -0,0 +1,29 @@
+---
+title: Introduction
+---
+
+**Open Interpreter** works with both hosted and local language models.
+
+Hosted models are faster and more capable, but require payment. Local models are private and free, but are often less capable.
+
+For this reason, we recommend starting with a **hosted** model, then switching to a local model once you've explored Open Interpreter's capabilities.
+
+
+
+
+ Connect to a hosted language model like GPT-4 **(recommended)**
+
+
+
+ Setup a local language model like Mistral
+
+
+
+
+
+
+
+
+ Thank you to the incredible [LiteLLM](https://litellm.ai/) team for their
+ efforts in connecting Open Interpreter to hosted providers.
+
diff --git a/archive/classic_docs/language-models/local-models/best-practices.mdx b/archive/classic_docs/language-models/local-models/best-practices.mdx
new file mode 100644
index 0000000000..fb420bec44
--- /dev/null
+++ b/archive/classic_docs/language-models/local-models/best-practices.mdx
@@ -0,0 +1,35 @@
+---
+title: "Best Practices"
+---
+
+Most settings — like model architecture and GPU offloading — can be adjusted via your LLM providers like [LM Studio.](https://lmstudio.ai/)
+
+**However, `max_tokens` and `context_window` should be set via Open Interpreter.**
+
+For local mode, smaller context windows will use less RAM, so we recommend trying a much shorter window (~1000) if it's is failing or if it's slow.
+
+
+
+```bash Terminal
+interpreter --local --max_tokens 1000 --context_window 3000
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.offline = True # Disables online features like Open Procedures
+interpreter.llm.model = "openai/x" # Tells OI to send messages in OpenAI's format
+interpreter.llm.api_key = "fake_key" # LiteLLM, which we use to talk to LM Studio, requires this
+interpreter.llm.api_base = "http://localhost:1234/v1" # Point this at any OpenAI compatible server
+
+interpreter.llm.max_tokens = 1000
+interpreter.llm.context_window = 3000
+
+interpreter.chat()
+```
+
+
+
+
+
+Make sure `max_tokens` is less than `context_window`.
diff --git a/archive/classic_docs/language-models/local-models/custom-endpoint.mdx b/archive/classic_docs/language-models/local-models/custom-endpoint.mdx
new file mode 100644
index 0000000000..c70d37058e
--- /dev/null
+++ b/archive/classic_docs/language-models/local-models/custom-endpoint.mdx
@@ -0,0 +1,19 @@
+---
+title: Custom Endpoint
+---
+
+Simply set `api_base` to any OpenAI compatible server:
+
+
+```bash Terminal
+interpreter --api_base
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.api_base = ""
+interpreter.chat()
+```
+
+
diff --git a/archive/classic_docs/language-models/local-models/janai.mdx b/archive/classic_docs/language-models/local-models/janai.mdx
new file mode 100644
index 0000000000..215ed9c2f4
--- /dev/null
+++ b/archive/classic_docs/language-models/local-models/janai.mdx
@@ -0,0 +1,56 @@
+---
+title: Jan.ai
+---
+
+Jan.ai is an open-source platform for running local language models on your computer, and is equipped with a built in server.
+
+To run Open Interpreter with Jan.ai, follow these steps:
+
+1. [Install](https://jan.ai/) the Jan.ai Desktop Application on your computer.
+
+2. Once installed, you will need to install a language model. Click the 'Hub' icon on the left sidebar (the four squares icon). Click the 'Download' button next to the model you would like to install, and wait for it to finish installing before continuing.
+
+3. To start your model, click the 'Settings' icon at the bottom of the left sidebar. Then click 'Models' under the CORE EXTENSIONS section. This page displays all of your installed models. Click the options icon next to the model you would like to start (vertical ellipsis icon). Then click 'Start Model', which will take a few seconds to fire up.
+
+4. Click the 'Advanced' button under the GENERAL section, and toggle on the "Enable API Server" option. This will start a local server that you can use to interact with your model.
+
+5. Now we fire up Open Interpreter with this custom model. Either run `interpreter --local` in the terminal to set it up interactively, or run this command, but replace `` with the id of the model you downloaded:
+
+
+
+```bash Terminal
+interpreter --api_base http://localhost:1337/v1 --model
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.offline = True # Disables online features like Open Procedures
+interpreter.llm.model = ""
+interpreter.llm.api_base = "http://localhost:1337/v1 "
+
+interpreter.chat()
+```
+
+
+
+If your model can handle a longer context window than the default 3000, you can set the context window manually by running:
+
+
+
+```bash Terminal
+interpreter --api_base http://localhost:1337/v1 --model --context_window 5000
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.context_window = 5000
+```
+
+
+
+
+ If Jan is producing strange output, or no output at all, make sure to update
+ to the latest version and clean your cache.
+
diff --git a/archive/classic_docs/language-models/local-models/llamafile.mdx b/archive/classic_docs/language-models/local-models/llamafile.mdx
new file mode 100644
index 0000000000..372283e765
--- /dev/null
+++ b/archive/classic_docs/language-models/local-models/llamafile.mdx
@@ -0,0 +1,27 @@
+---
+title: LlamaFile
+---
+
+The easiest way to get started with local models in Open Interpreter is to run `interpreter --local` in the terminal, select LlamaFile, then go through the interactive set up process. This will download the model and start the server for you. If you choose to do it manually, you can follow the instructions below.
+
+To use LlamaFile manually with Open Interpreter, you'll need to download the model and start the server by running the file in the terminal. You can do this with the following commands:
+
+```bash
+# Download Mixtral
+
+wget https://huggingface.co/jartine/Mixtral-8x7B-v0.1.llamafile/resolve/main/mixtral-8x7b-instruct-v0.1.Q5_K_M-server.llamafile
+
+# Make it an executable
+
+chmod +x mixtral-8x7b-instruct-v0.1.Q5_K_M-server.llamafile
+
+# Start the server
+
+./mixtral-8x7b-instruct-v0.1.Q5_K_M-server.llamafile
+
+# In a separate terminal window, run OI and point it at the llamafile server
+
+interpreter --api_base https://localhost:8080/v1
+```
+
+Please note that if you are using a Mac with Apple Silicon, you'll need to have Xcode installed.
diff --git a/archive/classic_docs/language-models/local-models/lm-studio.mdx b/archive/classic_docs/language-models/local-models/lm-studio.mdx
new file mode 100644
index 0000000000..384f7e37e9
--- /dev/null
+++ b/archive/classic_docs/language-models/local-models/lm-studio.mdx
@@ -0,0 +1,57 @@
+---
+title: LM Studio
+---
+
+Open Interpreter can use OpenAI-compatible server to run models locally. (LM Studio, jan.ai, ollama etc)
+
+Simply run `interpreter` with the api_base URL of your inference server (for LM studio it is `http://localhost:1234/v1` by default):
+
+```shell
+interpreter --api_base "http://localhost:1234/v1" --api_key "fake_key"
+```
+
+Alternatively you can use Llamafile without installing any third party software just by running
+
+```shell
+interpreter --local
+```
+
+for a more detailed guide check out [this video by Mike Bird](https://www.youtube.com/watch?v=CEs51hGWuGU?si=cN7f6QhfT4edfG5H)
+
+**How to run LM Studio in the background.**
+
+1. Download [https://lmstudio.ai/](https://lmstudio.ai/) then start it.
+2. Select a model then click **↓ Download**.
+3. Click the **↔️** button on the left (below 💬).
+4. Select your model at the top, then click **Start Server**.
+
+Once the server is running, you can begin your conversation with Open Interpreter.
+
+(When you run the command `interpreter --local` and select LMStudio, these steps will be displayed.)
+
+
+ Local mode sets your `context_window` to 3000, and your `max_tokens` to 1000.
+ If your model has different requirements, [set these parameters
+ manually.](/settings#language-model)
+
+
+# Python
+
+Compared to the terminal interface, our Python package gives you more granular control over each setting.
+
+You can point `interpreter.llm.api_base` at any OpenAI compatible server (including one running locally).
+
+For example, to connect to [LM Studio](https://lmstudio.ai/), use these settings:
+
+```python
+from interpreter import interpreter
+
+interpreter.offline = True # Disables online features like Open Procedures
+interpreter.llm.model = "openai/x" # Tells OI to send messages in OpenAI's format
+interpreter.llm.api_key = "fake_key" # LiteLLM, which we use to talk to LM Studio, requires this
+interpreter.llm.api_base = "http://localhost:1234/v1" # Point this at any OpenAI compatible server
+
+interpreter.chat()
+```
+
+Simply ensure that **LM Studio**, or any other OpenAI compatible server, is running at `api_base`.
diff --git a/archive/classic_docs/language-models/local-models/ollama.mdx b/archive/classic_docs/language-models/local-models/ollama.mdx
new file mode 100644
index 0000000000..4649c01963
--- /dev/null
+++ b/archive/classic_docs/language-models/local-models/ollama.mdx
@@ -0,0 +1,44 @@
+---
+title: Ollama
+---
+
+Ollama is an easy way to get local language models running on your computer through a command-line interface.
+
+To run Ollama with Open interpreter:
+
+1. Download Ollama for your platform from [here](https://ollama.ai/download).
+
+2. Open the installed Ollama application, and go through the setup, which will require your password.
+
+3. Now you are ready to download a model. You can view all available models [here](https://ollama.ai/library). To download a model, run:
+
+```bash
+ollama run
+```
+
+4. It will likely take a while to download, but once it does, we are ready to use it with Open Interpreter. You can either run `interpreter --local` to set it up interactively in the terminal, or do it manually:
+
+
+
+```bash Terminal
+interpreter --model ollama/
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.offline = True # Disables online features like Open Procedures
+interpreter.llm.model = "ollama_chat/"
+interpreter.llm.api_base = "http://localhost:11434"
+
+interpreter.chat()
+```
+
+
+
+For any future runs with Ollama, ensure that the Ollama server is running. If using the desktop application, you can check to see if the Ollama menu bar item is active.
+
+
+ If Ollama is producing strange output, make sure to update to the latest
+ version
+
diff --git a/archive/classic_docs/language-models/settings.mdx b/archive/classic_docs/language-models/settings.mdx
new file mode 100644
index 0000000000..02968b6643
--- /dev/null
+++ b/archive/classic_docs/language-models/settings.mdx
@@ -0,0 +1,7 @@
+---
+title: Settings
+---
+
+The `interpreter.llm` is responsible for running the language model.
+
+[Click here](/settings/all-settings#language-model) to view `interpreter.llm` settings.
diff --git a/archive/classic_docs/legal/license.mdx b/archive/classic_docs/legal/license.mdx
new file mode 100644
index 0000000000..18176e0984
--- /dev/null
+++ b/archive/classic_docs/legal/license.mdx
@@ -0,0 +1,117 @@
+---
+title: Licenses
+description: By using Interpreter, you agree to our Privacy Policy and Terms of Service
+---
+
+\n
+
+# Interpreter Privacy Policy
+
+Last updated: August 13, 2024
+
+Open Interpreter, Inc. ("we," "our," or "us") is committed to protecting your privacy. This Privacy Policy explains how we collect, use, and safeguard your information when you use our AI desktop application, Interpreter ("the Application").
+
+## 1. Information We Collect
+
+We collect the following information:
+
+a) Personal Information:
+ - Name
+ - Email address
+
+b) Usage Information:
+ - Conversations with the AI chatbot
+ - Code generated during use of the Application
+
+## 2. How We Use Your Information
+
+We use the collected information to:
+
+a) Provide and improve our services
+b) Communicate with you about your account or the Application
+c) Improve our underlying AI model
+
+## 3. Data Anonymization
+
+All conversations and generated code are anonymized before being used to improve our AI model. However, please be aware that if you explicitly instruct the AI to include personal identifiable information (PII) in the generated code, such information may be captured.
+
+## 4. Data Security
+
+We implement appropriate technical and organizational measures to protect your personal information. However, no method of transmission over the Internet or electronic storage is 100% secure.
+
+## 5. Your Rights
+
+You have the right to access, correct, or delete your personal information. Please contact us at help@openinterpreter.com for any data-related requests.
+
+## 6. Changes to This Privacy Policy
+
+We may update our Privacy Policy from time to time. We will notify you of any changes by posting the new Privacy Policy on this page and updating the "Last updated" date.
+
+## 7. Contact Us
+
+If you have any questions about this Privacy Policy, please contact us at help@openinterpreter.com.
+
+By using Interpreter, you agree to the collection and use of information in accordance with this Privacy Policy.
+
+---
+
+# Interpreter Terms of Service
+
+Last updated: August 13, 2024
+
+Please read these Terms of Service ("Terms", "Terms of Service") carefully before using the Interpreter desktop application (the "Service") operated by Open Interpreter, Inc. ("us", "we", or "our").
+
+## 1. Acceptance of Terms
+
+By accessing or using the Service, you agree to be bound by these Terms. If you disagree with any part of the terms, then you may not access the Service.
+
+## 2. Description of Service
+
+Interpreter is an AI-powered desktop application that allows users to interact with an AI chatbot to write and execute code.
+
+## 3. User Responsibilities
+
+By using our Service, you agree to:
+
+a) Review ALL code generated by Interpreter before execution.
+b) Grant explicit permission before any code is executed on your system.
+c) Understand the implications of the code you choose to execute.
+d) Use the Service in compliance with all applicable laws and regulations.
+
+## 4. Safety Measures
+
+We have implemented the following safety measures:
+
+a) We employ LakeraGuard, an industry-leading solution, to assess potential harm in generated code.
+b) Our custom judge layer provides explanations of what the code is intended to do.
+c) You will always be asked for permission before any code is executed.
+
+## 5. Assumption of Risk
+
+By using Interpreter, you acknowledge and accept the following risks:
+
+a) The application may generate code that, if executed, could alter or delete files on your system.
+b) While we have implemented safety measures, the AI may occasionally generate code with unintended consequences.
+c) In rare cases, the application might generate code that, if executed, could potentially expose sensitive information.
+
+## 6. Limitation of Liability
+
+To the fullest extent permitted by law, Open Interpreter, Inc. shall not be liable for any direct, indirect, incidental, special, consequential, or exemplary damages resulting from your use of the Service or any code generated or executed through the Service.
+
+## 7. Indemnification
+
+You agree to indemnify and hold harmless Open Interpreter, Inc., its officers, directors, employees, and agents from any claims, damages, losses, liabilities, and expenses (including legal fees) arising out of or related to your use of the Service or any code generated or executed through the Service.
+
+## 8. Modifications to Terms
+
+We reserve the right to modify these Terms at any time. Continued use of the Service after changes constitutes acceptance of the modified Terms.
+
+## 9. Governing Law
+
+These Terms shall be governed by and construed in accordance with the laws of [Your Jurisdiction], without regard to its conflict of law provisions.
+
+## 10. Contact Us
+
+If you have any questions about these Terms, please contact us at help@openinterpreter.com.
+
+By using Interpreter, you acknowledge that you have read, understood, and agree to be bound by these Terms of Service.
\ No newline at end of file
diff --git a/archive/classic_docs/mint.json b/archive/classic_docs/mint.json
new file mode 100644
index 0000000000..f2a4cf7204
--- /dev/null
+++ b/archive/classic_docs/mint.json
@@ -0,0 +1,160 @@
+{
+ "name": "Open Interpreter",
+ "logo": {
+ "dark": "/assets/logo/circle-inverted.png",
+ "light": "/assets/logo/circle.png"
+ },
+ "favicon": "/assets/favicon.png",
+ "colors": {
+ "primary": "#000000",
+ "light": "#FFFFFF",
+ "dark": "#000000",
+ "background": {
+ "light": "#FFFFFF",
+ "dark": "#000000"
+ },
+ "anchors": {
+ "from": "#000000",
+ "to": "#000000"
+ }
+ },
+ "topbarLinks": [
+ {
+ "name": "50K ★ GitHub",
+ "url": "https://github.com/OpenInterpreter/open-interpreter"
+ }
+ ],
+ "topbarCtaButton": {
+ "name": "Join Discord",
+ "url": "https://discord.gg/Hvz9Axh84z"
+ },
+ "navigation": [
+ {
+ "group": "Getting Started",
+ "pages": [
+ "getting-started/introduction",
+ "getting-started/setup"
+ ]
+ },
+ {
+ "group": "Guides",
+ "pages": [
+ "guides/basic-usage",
+ "guides/running-locally",
+ "guides/profiles",
+ "guides/streaming-response",
+ "guides/advanced-terminal-usage",
+ "guides/multiple-instances",
+ "guides/os-mode"
+ ]
+ },
+ {
+ "group": "Settings",
+ "pages": [
+ "settings/all-settings"
+ ]
+ },
+ {
+ "group": "Language Models",
+ "pages": [
+ "language-models/introduction",
+ {
+ "group": "Hosted Providers",
+ "pages": [
+ "language-models/hosted-models/openai",
+ "language-models/hosted-models/azure",
+ "language-models/hosted-models/vertex-ai",
+ "language-models/hosted-models/replicate",
+ "language-models/hosted-models/togetherai",
+ "language-models/hosted-models/mistral-api",
+ "language-models/hosted-models/anthropic",
+ "language-models/hosted-models/anyscale",
+ "language-models/hosted-models/aws-sagemaker",
+ "language-models/hosted-models/baseten",
+ "language-models/hosted-models/cloudflare",
+ "language-models/hosted-models/cohere",
+ "language-models/hosted-models/ai21",
+ "language-models/hosted-models/deepinfra",
+ "language-models/hosted-models/huggingface",
+ "language-models/hosted-models/nlp-cloud",
+ "language-models/hosted-models/openrouter",
+ "language-models/hosted-models/palm",
+ "language-models/hosted-models/perplexity",
+ "language-models/hosted-models/petals",
+ "language-models/hosted-models/vllm"
+ ]
+ },
+ {
+ "group": "Local Providers",
+ "pages": [
+ "language-models/local-models/ollama",
+ "language-models/local-models/llamafile",
+ "language-models/local-models/janai",
+ "language-models/local-models/lm-studio",
+ "language-models/local-models/custom-endpoint",
+ "language-models/local-models/best-practices"
+ ]
+ },
+ "language-models/custom-models",
+ "language-models/settings"
+ ]
+ },
+ {
+ "group": "Code Execution",
+ "pages": [
+ "code-execution/usage",
+ "code-execution/computer-api",
+ "code-execution/custom-languages",
+ "code-execution/settings"
+ ]
+ },
+ {
+ "group": "Protocols",
+ "pages": [
+ "protocols/lmc-messages"
+ ]
+ },
+ {
+ "group": "Integrations",
+ "pages": [
+ "integrations/e2b",
+ "integrations/docker"
+ ]
+ },
+ {
+ "group": "Safety",
+ "pages": [
+ "safety/introduction",
+ "safety/isolation",
+ "safety/safe-mode",
+ "safety/best-practices"
+ ]
+ },
+ {
+ "group": "Troubleshooting",
+ "pages": [
+ "troubleshooting/faq"
+ ]
+ },
+ {
+ "group": "Telemetry",
+ "pages": [
+ "telemetry/telemetry"
+ ]
+ },
+ {
+ "group": "Policies",
+ "pages": [
+ "legal/license"
+ ]
+ }
+ ],
+ "feedback": {
+ "suggestEdit": true
+ },
+ "footerSocials": {
+ "twitter": "https://x.com/OpenInterpreter",
+ "youtube": "https://www.youtube.com/@OpenInterpreter",
+ "linkedin": "https://www.linkedin.com/company/openinterpreter"
+ }
+}
\ No newline at end of file
diff --git a/interpreter/core/computer/__init__.py b/archive/classic_docs/protocols/i-protocol.mdx
similarity index 100%
rename from interpreter/core/computer/__init__.py
rename to archive/classic_docs/protocols/i-protocol.mdx
diff --git a/archive/classic_docs/protocols/lmc-messages.mdx b/archive/classic_docs/protocols/lmc-messages.mdx
new file mode 100644
index 0000000000..e0731f42c8
--- /dev/null
+++ b/archive/classic_docs/protocols/lmc-messages.mdx
@@ -0,0 +1,68 @@
+---
+title: LMC Messages
+---
+
+To support the incoming `L`anguage `M`odel `C`omputer architecture, we extend OpenAI's messages format to include additional information, and a new role called `computer`:
+
+```python
+# The user sends a message.
+{"role": "user", "type": "message", "content": "What's 2380*3875?"}
+
+# The assistant runs some code.
+{"role": "assistant", "type": "code", "format": "python", "content": "2380*3875"}
+
+# The computer responds with the result of the code.
+{"role": "computer", "type": "console", "format": "output", "content": "9222500"}
+
+# The assistant sends a message.
+{"role": "assistant", "type": "message", "content": "The result of multiplying 2380 by 3875 is 9222500."}
+```
+
+## Anatomy
+
+Each message in the LMC architecture has the following parameters (`format` is only present for some types):
+
+```
+{
+ "role": "", # Who is sending the message.
+ "type": "", # What kind of message is being sent.
+ "format": "" # Some types need to be further specified, so they optionally use this parameter.
+ "content": "", # What the message says.
+}
+```
+
+Parameter|Description|
+---|---|
+`role`|The sender of the message.|
+`type`|The kind of message being sent.|
+`content`|The actual content of the message.|
+`format`|The format of the content (optional).|
+
+## Roles
+
+Role|Description|
+---|---|
+`user`|The individual interacting with the system.|
+`assistant`|The language model.|
+`computer`|The system that executes the language model's commands.|
+
+## Possible Message Types / Formats
+
+Any role can produce any of the following formats, but we've included a `Common Roles` column to give you a sense of the message type's usage.
+
+Type|Format|Content Description|Common Roles
+---|---|---|---|
+message|None|A text-only message.|`user`, `assistant`|
+console|active_line|The active line of code (from the most recent code block) that's executing.|`computer`|
+console|output|Text output resulting from `print()` statements in Python, `console.log()` statements in Javascript, etc. **This includes errors.**|`computer`|
+image|base64|A `base64` image in PNG format (default)|`user`, `computer`|
+image|base64.png|A `base64` image in PNG format|`user`, `computer`|
+image|base64.jpeg|A `base64` image in JPEG format|`user`, `computer`|
+image|path|A path to an image.|`user`, `computer`|
+code|html|HTML code that should be executed.|`assistant`, `computer`|
+code|javascript|JavaScript code that should be executed.|`assistant`, `computer`|
+code|python|Python code that should be executed.|`assistant`|
+code|r|R code that should be executed.|`assistant`|
+code|applescript|AppleScript code that should be executed.|`assistant`|
+code|shell|Shell code that should be executed.|`assistant`|
+audio|wav|audio in wav format for websocket.|`user`|
diff --git a/archive/classic_docs/safety/best-practices.mdx b/archive/classic_docs/safety/best-practices.mdx
new file mode 100644
index 0000000000..b5c6e2af2a
--- /dev/null
+++ b/archive/classic_docs/safety/best-practices.mdx
@@ -0,0 +1,17 @@
+---
+title: Best Practices
+---
+
+LLM's are not perfect. They can make mistakes, they can be tricked into doing things that they shouldn't, and they are capable of writing unsafe code. This page will help you understand how to use these LLM's safely.
+
+## Best Practices
+
+- Avoid asking it to perform potentially risky tasks. This seems obvious, but it's the number one way to prevent safety mishaps.
+
+- Run it in a sandbox. This is the safest way to run it, as it completely isolates the code it runs from the rest of your system.
+
+- Use trusted models. Yes, Open Interpreter can be configured to run pretty much any text-based model on huggingface. But it does not mean it's a good idea to run any random model you find. Make sure you trust the models you're using. If you're not sure, run it in a sandbox. Nefarious LLM's are becoming a real problem, and they are not going away anytime soon.
+
+- Local models are fun! But GPT-4 is probably your safest bet. OpenAI has their models aligned in a major way. It will outperform the local models, and it will generally refuse to run unsafe code, as it truly understands that the code it writes could be run. It has a pretty good idea what unsafe code looks like, and will refuse to run code like `rm -rf /` that would delete your entire disk, for example.
+
+- The [--safe_mode](/safety/safe-mode) argument is your friend. It enables code scanning, and can use [guarddog](https://github.com/DataDog/guarddog) to identify malicious PyPi and npm packages. It's not a perfect solution, but it's a great start.
diff --git a/archive/classic_docs/safety/introduction.mdx b/archive/classic_docs/safety/introduction.mdx
new file mode 100644
index 0000000000..46dc09415b
--- /dev/null
+++ b/archive/classic_docs/safety/introduction.mdx
@@ -0,0 +1,22 @@
+---
+title: Introduction
+---
+
+Safety is a top priority for us at Open Interpreter. Running LLM generated code on your computer is inherently risky, and we have taken steps to make it as safe as possible. One of the primary safety 'mechanisms', is the alignment of the LLM itself. GPT-4 refuses to run dangerous code like `rm -rf /`, it understands what that command will do, and won't let you footgun yourself. This is less applicable when running local models like Mistral, that have little or no alignment, making our other safety measures more important.
+
+# Safety Measures
+
+- [Safe mode](/safety/safe-mode) enables code scanning, as well as the ability to scan packages with [guarddog](https://github.com/DataDog/guarddog) with a simple change to the system message. See the [safe mode docs](/safety/safe-mode) for more information.
+
+- Requiring confirmation with the user before the code is actually run. This is a simple measure that can prevent a lot of accidents. It exists as another layer of protection, but can be disabled with the `--auto-run` flag if you wish.
+
+- Sandboxing code execution. Open Interpreter can be run in a sandboxed environment using [Docker](/integrations/docker). This is a great way to run code without worrying about it affecting your system. Docker support is currently experimental, but we are working on making it a core feature of Open Interpreter. Another option for sandboxing is [E2B](https://e2b.dev/), which overrides the default python language with a sandboxed, hosted version of python through E2B. Follow [this guide](/integrations/e2b) to set it up.
+
+## Notice
+
+
+ Open Interpreter is not responsible for any damage caused by using the
+ package. These safety measures provide no guarantees of safety or security.
+ Please be careful when running code generated by Open Interpreter, and make
+ sure you understand what it will do before running it.
+
diff --git a/archive/classic_docs/safety/isolation.mdx b/archive/classic_docs/safety/isolation.mdx
new file mode 100644
index 0000000000..848fbb18ef
--- /dev/null
+++ b/archive/classic_docs/safety/isolation.mdx
@@ -0,0 +1,19 @@
+---
+title: Isolation
+---
+
+Isolating Open Interpreter from your system is helpful to prevent security mishaps. By running it in a separate process, you can ensure that actions taken by Open Interpreter will not directly affect your system. This is by far the safest way to run Open Interpreter, although it can be limiting based on your use case.
+
+If you wish to sandbox Open Interpreter, we have two primary methods of doing so: Docker and E2B.
+
+## Docker
+
+Docker is a containerization technology that allows you to run an isolated Linux environment on your system. This allows you to run Open Interpreter in a container, which **completely** isolates it from your system. All code execution is done in the container, and the container is not able to access your system. Docker support is currently experimental, and we are working on integrating it as a core feature of Open Interpreter.
+
+Follow [these instructions](/integrations/docker) to get it running.
+
+## E2B
+
+[E2B](https://e2b.dev/) is a cloud-based platform for running sandboxed code environments, designed for use by AI agents. You can override the default `python` language in Open Interpreter to use E2B, and it will automatically run the code in a cloud-sandboxed environment. You will need an E2B account to use this feature. It's worth noting that this will only sandbox python code, other languages like shell and JavaScript will still be run on your system.
+
+Follow [these instructions](/integrations/e2b) to get it running.
diff --git a/archive/classic_docs/safety/safe-mode.mdx b/archive/classic_docs/safety/safe-mode.mdx
new file mode 100644
index 0000000000..b90b804416
--- /dev/null
+++ b/archive/classic_docs/safety/safe-mode.mdx
@@ -0,0 +1,64 @@
+---
+title: Safe Mode
+---
+
+# Safe Mode
+
+**⚠️ Safe mode is experimental and does not provide any guarantees of safety or security.**
+
+Open Interpreter is working on providing an experimental safety toolkit to help you feel more confident running the code generated by Open Interpreter.
+
+Install Open Interpreter with the safety toolkit dependencies as part of the bundle:
+
+```shell
+pip install open-interpreter[safe]
+```
+
+Alternatively, you can install the safety toolkit dependencies separately in your virtual environment:
+
+```shell
+pip install semgrep
+```
+
+## Features
+
+- **No Auto Run**: Safe mode disables the ability to automatically execute code
+- **Code Scanning**: Scan generated code for vulnerabilities with [`semgrep`](https://semgrep.dev/)
+
+## Enabling Safe Mode
+
+You can enable safe mode by passing the `--safe` flag when invoking `interpreter` or by configuring `safe_mode` in your [config file](https://github.com/OpenInterpreter/open-interpreter#configuration).
+
+The safe mode setting has three options:
+
+- `off`: disables the safety toolkit (_default_)
+- `ask`: prompts you to confirm that you want to scan code
+- `auto`: automatically scans code
+
+### Example Config:
+
+```yaml
+model: gpt-4
+temperature: 0
+verbose: false
+safe_mode: ask
+```
+
+## Roadmap
+
+Some upcoming features that enable even more safety:
+
+- [Execute code in containers](https://github.com/OpenInterpreter/open-interpreter/pull/459)
+
+## Tips & Tricks
+
+You can adjust the `custom_instructions` in your [config file](https://github.com/OpenInterpreter/open-interpreter#configuration) to include instructions for the model to scan packages with [guarddog](https://github.com/DataDog/guarddog) before installing them.
+
+```yaml
+model: gpt-4
+verbose: false
+safe_mode: ask
+system_message: |
+ # normal system message here
+ BEFORE INSTALLING ANY PACKAGES WITH pip OR npm YOU MUST SCAN THEM WITH `guarddog` FIRST. Run `guarddog pypi scan $package` for pip packages and `guarddog npm scan $package` for npm packages. `guarddog` only accepts one package name at a time.
+```
diff --git a/archive/classic_docs/server/usage.mdx b/archive/classic_docs/server/usage.mdx
new file mode 100644
index 0000000000..37c508ce3d
--- /dev/null
+++ b/archive/classic_docs/server/usage.mdx
@@ -0,0 +1,338 @@
+# Server Usage Guide
+
+## Starting the Server
+
+### From Command Line
+To start the server from the command line, use:
+
+```bash
+interpreter --server
+```
+
+### From Python
+To start the server from within a Python script:
+
+```python
+from interpreter import AsyncInterpreter
+
+async_interpreter = AsyncInterpreter()
+async_interpreter.server.run(port=8000) # Default port is 8000, but you can customize it
+```
+
+## WebSocket API
+
+### Establishing a Connection
+Connect to the WebSocket server at `ws://localhost:8000/`.
+
+### Message Format
+Open Interpreter uses an extended version of OpenAI's message format called [LMC messages](https://docs.openinterpreter.com/protocols/lmc-messages) that allow for rich, multi-part messages. **Messages must be sent between start and end flags.** Here's the basic structure:
+
+```json
+{"role": "user", "start": true}
+{"role": "user", "type": "message", "content": "Your message here"}
+{"role": "user", "end": true}
+```
+
+### Multi-part Messages
+You can send complex messages with multiple components:
+
+1. Start with `{"role": "user", "start": true}`
+2. Add various types of content (message, file, image, etc.)
+3. End with `{"role": "user", "end": true}`
+
+### Content Types
+You can include various types of content in your messages:
+
+- Text messages: `{"role": "user", "type": "message", "content": "Your text here"}`
+- File paths: `{"role": "user", "type": "file", "content": "path/to/file"}`
+- Images: `{"role": "user", "type": "image", "format": "path", "content": "path/to/photo"}`
+- Audio: `{"role": "user", "type": "audio", "format": "wav", "content": "path/to/audio.wav"}`
+
+### Control Commands
+To control the server's behavior, send the following commands:
+
+1. Stop execution:
+ ```json
+ {"role": "user", "type": "command", "content": "stop"}
+ ```
+ This stops all execution and message processing.
+
+2. Execute code block:
+ ```json
+ {"role": "user", "type": "command", "content": "go"}
+ ```
+ This executes a generated code block and allows the agent to proceed.
+
+ **Note**: If `auto_run` is set to `False`, the agent will pause after generating code blocks. You must send the "go" command to continue execution.
+
+### Completion Status
+The server indicates completion with the following message:
+```json
+{"role": "server", "type": "status", "content": "complete"}
+```
+Ensure your client watches for this message to determine when the interaction is finished.
+
+### Error Handling
+If an error occurs, the server will send an error message in the following format:
+```json
+{"role": "server", "type": "error", "content": "Error traceback information"}
+```
+Your client should be prepared to handle these error messages appropriately.
+
+## Code Execution Review
+
+After code blocks are executed, you'll receive a review message:
+
+```json
+{
+ "role": "assistant",
+ "type": "review",
+ "content": "Review of the executed code, including safety assessment and potential irreversible actions."
+}
+```
+
+This review provides important information about the safety and potential impact of the executed code. Pay close attention to these messages, especially when dealing with operations that might have significant effects on your system.
+
+The `content` field of the review message may have two possible formats:
+
+1. If the code is deemed completely safe, the content will be exactly `""`.
+2. Otherwise, it will contain an explanation of why the code might be unsafe or have irreversible effects.
+
+Example of a safe code review:
+```json
+{
+ "role": "assistant",
+ "type": "review",
+ "content": ""
+}
+```
+
+Example of a potentially unsafe code review:
+```json
+{
+ "role": "assistant",
+ "type": "review",
+ "content": "This code performs file deletion operations which are irreversible. Please review carefully before proceeding."
+}
+```
+
+## Example WebSocket Interaction
+
+Here's an example demonstrating the WebSocket interaction:
+
+```python
+import websockets
+import json
+import asyncio
+
+async def websocket_interaction():
+ async with websockets.connect("ws://localhost:8000/") as websocket:
+ # Send a multi-part user message
+ await websocket.send(json.dumps({"role": "user", "start": True}))
+ await websocket.send(json.dumps({"role": "user", "type": "message", "content": "Analyze this image:"}))
+ await websocket.send(json.dumps({"role": "user", "type": "image", "format": "path", "content": "path/to/image.jpg"}))
+ await websocket.send(json.dumps({"role": "user", "end": True}))
+
+ # Receive and process messages
+ while True:
+ message = await websocket.recv()
+ data = json.loads(message)
+
+ if data.get("type") == "message":
+ print(f"Assistant: {data.get('content', '')}")
+ elif data.get("type") == "review":
+ print(f"Code Review: {data.get('content')}")
+ elif data.get("type") == "error":
+ print(f"Error: {data.get('content')}")
+ elif data == {"role": "assistant", "type": "status", "content": "complete"}:
+ print("Interaction complete")
+ break
+
+asyncio.run(websocket_interaction())
+```
+
+## HTTP API
+
+### Modifying Settings
+To change server settings, send a POST request to `http://localhost:8000/settings`. The payload should conform to [the interpreter object's settings](https://docs.openinterpreter.com/settings/all-settings).
+
+Example:
+```python
+import requests
+
+settings = {
+ "llm": {"model": "gpt-4"},
+ "custom_instructions": "You only write Python code.",
+ "auto_run": True,
+}
+response = requests.post("http://localhost:8000/settings", json=settings)
+print(response.status_code)
+```
+
+### Retrieving Settings
+To get current settings, send a GET request to `http://localhost:8000/settings/{property}`.
+
+Example:
+```python
+response = requests.get("http://localhost:8000/settings/custom_instructions")
+print(response.json())
+# Output: {"custom_instructions": "You only write Python code."}
+```
+
+## OpenAI-Compatible Endpoint
+
+The server provides an OpenAI-compatible endpoint at `/openai`. This allows you to use the server with any tool or library that's designed to work with the OpenAI API.
+
+### Chat Completions Endpoint
+
+The chat completions endpoint is available at:
+
+```
+[server_url]/openai/chat/completions
+```
+
+To use this endpoint, set the `api_base` in your OpenAI client or configuration to `[server_url]/openai`. For example:
+
+```python
+import openai
+
+openai.api_base = "http://localhost:8000/openai" # Replace with your server URL if different
+openai.api_key = "dummy" # The key is not used but required by the OpenAI library
+
+response = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo", # This model name is ignored, but required
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "What's the capital of France?"}
+ ]
+)
+
+print(response.choices[0].message['content'])
+```
+
+Note that only the chat completions endpoint (`/chat/completions`) is implemented. Other OpenAI API endpoints are not available.
+
+When using this endpoint:
+- The `model` parameter is required but ignored.
+- The `api_key` is required by the OpenAI library but not used by the server.
+
+## Using Docker
+
+You can also run the server using Docker. First, build the Docker image from the root of the repository:
+
+```bash
+docker build -t open-interpreter .
+```
+
+Then, run the container:
+
+```bash
+docker run -p 8000:8000 open-interpreter
+```
+
+This will expose the server on port 8000 of your host machine.
+
+## Acknowledgment Feature
+
+When the `INTERPRETER_REQUIRE_ACKNOWLEDGE` environment variable is set to `"True"`, the server requires clients to acknowledge each message received. This feature ensures reliable message delivery in environments where network stability might be a concern.
+
+### How it works
+
+1. When this feature is enabled, each message sent by the server will include an `id` field.
+2. The client must send an acknowledgment message back to the server for each received message.
+3. The server will wait for this acknowledgment before sending the next message.
+
+### Client Implementation
+
+To implement this on the client side:
+
+1. Check if each received message contains an `id` field.
+2. If an `id` is present, send an acknowledgment message back to the server.
+
+Here's an example of how to handle this in your WebSocket client:
+
+```python
+import json
+import websockets
+
+async def handle_messages(websocket):
+ async for message in websocket:
+ data = json.loads(message)
+
+ # Process the message as usual
+ print(f"Received: {data}")
+
+ # Check if the message has an ID that needs to be acknowledged
+ if "id" in data:
+ ack_message = {
+ "ack": data["id"]
+ }
+ await websocket.send(json.dumps(ack_message))
+ print(f"Sent acknowledgment for message {data['id']}")
+
+async def main():
+ uri = "ws://localhost:8000"
+ async with websockets.connect(uri) as websocket:
+ await handle_messages(websocket)
+
+# Run the async function
+import asyncio
+asyncio.run(main())
+```
+
+### Server Behavior
+
+- If the server doesn't receive an acknowledgment within a certain timeframe, it will attempt to resend the message.
+- The server will make multiple attempts to send a message before considering it failed.
+
+### Enabling the Feature
+
+To enable this feature, set the `INTERPRETER_REQUIRE_ACKNOWLEDGE` environment variable to `"True"` before starting the server:
+
+```bash
+export INTERPRETER_REQUIRE_ACKNOWLEDGE="True"
+interpreter --server
+```
+
+Or in Python:
+
+```python
+import os
+os.environ["INTERPRETER_REQUIRE_ACKNOWLEDGE"] = "True"
+
+from interpreter import AsyncInterpreter
+async_interpreter = AsyncInterpreter()
+async_interpreter.server.run()
+```
+
+## Advanced Usage: Accessing the FastAPI App Directly
+
+The FastAPI app is exposed at `async_interpreter.server.app`. This allows you to add custom routes or host the app using Uvicorn directly.
+
+Example of adding a custom route and hosting with Uvicorn:
+
+```python
+from interpreter import AsyncInterpreter
+from fastapi import FastAPI
+import uvicorn
+
+async_interpreter = AsyncInterpreter()
+app = async_interpreter.server.app
+
+@app.get("/custom")
+async def custom_route():
+ return {"message": "This is a custom route"}
+
+if __name__ == "__main__":
+ uvicorn.run(app, host="0.0.0.0", port=8000)
+```
+
+## Best Practices
+
+1. Always handle the "complete" status message to ensure your client knows when the server has finished processing.
+2. If `auto_run` is set to `False`, remember to send the "go" command to execute code blocks and continue the interaction.
+3. Implement proper error handling in your client to manage potential connection issues, unexpected server responses, or server-sent error messages.
+4. Use the AsyncInterpreter class when working with the server in Python to ensure compatibility with asynchronous operations.
+5. Pay attention to the code execution review messages for important safety and operational information.
+6. Utilize the multi-part user message structure for complex inputs, including file paths and images.
+7. When sending file paths or image paths, ensure they are accessible to the server.
\ No newline at end of file
diff --git a/archive/classic_docs/settings/all-settings.mdx b/archive/classic_docs/settings/all-settings.mdx
new file mode 100644
index 0000000000..7fe06b8c2b
--- /dev/null
+++ b/archive/classic_docs/settings/all-settings.mdx
@@ -0,0 +1,804 @@
+---
+title: All Settings
+---
+
+
+
+
+ Set your `model`, `api_key`, `temperature`, etc.
+
+
+
+ Change your `system_message`, set your interpreter to run `offline`, etc.
+
+
+ Modify the `interpreter.computer`, which handles code execution.
+
+
+
+
+# Language Model
+
+### Model Selection
+
+Specifies which language model to use. Check out the [models](/language-models/) section for a list of available models. Open Interpreter uses [LiteLLM](https://github.com/BerriAI/litellm) under the hood to support over 100+ models.
+
+
+
+```bash Terminal
+interpreter --model "gpt-3.5-turbo"
+```
+
+```python Python
+interpreter.llm.model = "gpt-3.5-turbo"
+```
+
+```yaml Profile
+llm:
+ model: gpt-3.5-turbo
+```
+
+
+
+### Temperature
+
+Sets the randomness level of the model's output. The default temperature is 0, you can set it to any value between 0 and 1. The higher the temperature, the more random and creative the output will be.
+
+
+
+```bash Terminal
+interpreter --temperature 0.7
+```
+
+```python Python
+interpreter.llm.temperature = 0.7
+```
+
+```yaml Profile
+llm:
+ temperature: 0.7
+```
+
+
+
+### Context Window
+
+Manually set the context window size in tokens for the model. For local models, using a smaller context window will use less RAM, which is more suitable for most devices.
+
+
+
+```bash Terminal
+interpreter --context_window 16000
+```
+
+```python Python
+interpreter.llm.context_window = 16000
+```
+
+```yaml Profile
+llm:
+ context_window: 16000
+```
+
+
+
+### Max Tokens
+
+Sets the maximum number of tokens that the model can generate in a single response.
+
+
+
+```bash Terminal
+interpreter --max_tokens 100
+```
+
+```python Python
+interpreter.llm.max_tokens = 100
+```
+
+```yaml Profile
+llm:
+ max_tokens: 100
+```
+
+
+
+### Max Output
+
+Set the maximum number of characters for code outputs.
+
+
+
+```bash Terminal
+interpreter --max_output 1000
+```
+
+```python Python
+interpreter.llm.max_output = 1000
+```
+
+```yaml Profile
+llm:
+ max_output: 1000
+```
+
+
+
+### API Base
+
+If you are using a custom API, specify its base URL with this argument.
+
+
+
+```bash Terminal
+interpreter --api_base "https://api.example.com"
+```
+
+```python Python
+interpreter.llm.api_base = "https://api.example.com"
+```
+
+```yaml Profile
+llm:
+ api_base: https://api.example.com
+```
+
+
+
+### API Key
+
+Set your API key for authentication when making API calls. For OpenAI models, you can get your API key [here](https://platform.openai.com/api-keys).
+
+
+
+```bash Terminal
+interpreter --api_key "your_api_key_here"
+```
+
+```python Python
+interpreter.llm.api_key = "your_api_key_here"
+```
+
+```yaml Profile
+llm:
+ api_key: your_api_key_here
+```
+
+
+
+### API Version
+
+Optionally set the API version to use with your selected model. (This will override environment variables)
+
+
+
+```bash Terminal
+interpreter --api_version 2.0.2
+```
+
+```python Python
+interpreter.llm.api_version = '2.0.2'
+```
+
+```yaml Profile
+llm:
+ api_version: 2.0.2
+```
+
+
+
+### LLM Supports Functions
+
+Inform Open Interpreter that the language model you're using supports function calling.
+
+
+
+```bash Terminal
+interpreter --llm_supports_functions
+```
+
+```python Python
+interpreter.llm.supports_functions = True
+```
+
+```yaml Profile
+llm:
+ supports_functions: true
+```
+
+
+
+### LLM Does Not Support Functions
+
+Inform Open Interpreter that the language model you're using does not support function calling.
+
+
+
+```bash Terminal
+interpreter --no-llm_supports_functions
+```
+
+```python Python
+interpreter.llm.supports_functions = False
+```
+
+```yaml Profile
+llm:
+ supports_functions: false
+```
+
+
+
+### Execution Instructions
+
+If `llm.supports_functions` is `False`, this value will be added to the system message. This parameter tells language models how to execute code. This can be set to an empty string or to `False` if you don't want to tell the LLM how to do this.
+
+
+
+````python Python
+interpreter.llm.execution_instructions = "To execute code on the user's machine, write a markdown code block. Specify the language after the ```. You will receive the output. Use any programming language."
+````
+
+````python Profile
+interpreter.llm.execution_instructions = "To execute code on the user's machine, write a markdown code block. Specify the language after the ```. You will receive the output. Use any programming language."
+````
+
+
+
+### LLM Supports Vision
+
+Inform Open Interpreter that the language model you're using supports vision. Defaults to `False`.
+
+
+
+```bash Terminal
+interpreter --llm_supports_vision
+```
+
+```python Python
+interpreter.llm.supports_vision = True
+```
+
+```yaml Profile
+llm:
+ supports_vision: true
+```
+
+
+
+# Interpreter
+
+### Vision Mode
+
+Enables vision mode, which adds some special instructions to the prompt and switches to `gpt-4o`.
+
+
+```bash Terminal
+interpreter --vision
+```
+
+```python Python
+interpreter.llm.model = "gpt-4o" # Any vision supporting model
+interpreter.llm.supports_vision = True
+interpreter.llm.supports_functions = True
+
+interpreter.custom_instructions = """The user will show you an image of the code you write. You can view images directly.
+For HTML: This will be run STATELESSLY. You may NEVER write '' or `` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.
+If the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message.
+If you use `plt.show()`, the resulting image will be sent to you. However, if you use `PIL.Image.show()`, the resulting image will NOT be sent to you."""
+```
+
+```yaml Profile
+loop: True
+
+llm:
+ model: "gpt-4o"
+ temperature: 0
+ supports_vision: True
+ supports_functions: True
+ context_window: 110000
+ max_tokens: 4096
+ custom_instructions: >
+ The user will show you an image of the code you write. You can view images directly.
+ For HTML: This will be run STATELESSLY. You may NEVER write '' or `` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.
+ If the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message.
+ If you use `plt.show()`, the resulting image will be sent to you. However, if you use `PIL.Image.show()`, the resulting image will NOT be sent to you.
+```
+
+
+
+### OS Mode
+
+Enables OS mode for multimodal models. Currently not available in Python. Check out more information on OS mode [here](/guides/os-mode).
+
+
+
+```bash Terminal
+interpreter --os
+```
+
+```yaml Profile
+os: true
+```
+
+
+
+### Version
+
+Get the current installed version number of Open Interpreter.
+
+
+
+```bash Terminal
+interpreter --version
+```
+
+
+
+### Open Local Models Directory
+
+Opens the models directory. All downloaded Llamafiles are saved here.
+
+
+
+```bash Terminal
+interpreter --local_models
+```
+
+
+
+### Open Profiles Directory
+
+Opens the profiles directory. New yaml profile files can be added to this directory.
+
+
+
+```bash Terminal
+interpreter --profiles
+```
+
+
+
+### Select Profile
+
+Select a profile to use. If no profile is specified, the default profile will be used.
+
+
+
+```bash Terminal
+interpreter --profile local.yaml
+```
+
+
+
+### Help
+
+Display all available terminal arguments.
+
+
+
+```bash Terminal
+interpreter --help
+```
+
+
+
+### Loop (Force Task Completion)
+
+Runs Open Interpreter in a loop, requiring it to admit to completing or failing every task.
+
+
+
+```bash Terminal
+interpreter --loop
+```
+
+```python Python
+interpreter.loop = True
+```
+
+```yaml Profile
+loop: true
+```
+
+
+
+### Verbose
+
+Run the interpreter in verbose mode. Debug information will be printed at each step to help diagnose issues.
+
+
+
+```bash Terminal
+interpreter --verbose
+```
+
+```python Python
+interpreter.verbose = True
+```
+
+```yaml Profile
+verbose: true
+```
+
+
+
+### Safe Mode
+
+Enable or disable experimental safety mechanisms like code scanning. Valid options are `off`, `ask`, and `auto`.
+
+
+
+```bash Terminal
+interpreter --safe_mode ask
+```
+
+```python Python
+interpreter.safe_mode = 'ask'
+```
+
+```yaml Profile
+safe_mode: ask
+```
+
+
+
+### Auto Run
+
+Automatically run the interpreter without requiring user confirmation.
+
+
+
+```bash Terminal
+interpreter --auto_run
+```
+
+```python Python
+interpreter.auto_run = True
+```
+
+```yaml Profile
+auto_run: true
+```
+
+
+
+### Max Budget
+
+Sets the maximum budget limit for the session in USD.
+
+
+
+```bash Terminal
+interpreter --max_budget 0.01
+```
+
+```python Python
+interpreter.max_budget = 0.01
+```
+
+```yaml Profile
+max_budget: 0.01
+```
+
+
+
+### Local Mode
+
+Run the model locally. Check the [models page](/language-models/local-models/lm-studio) for more information.
+
+
+
+```bash Terminal
+interpreter --local
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.offline = True # Disables online features like Open Procedures
+interpreter.llm.model = "openai/x" # Tells OI to send messages in OpenAI's format
+interpreter.llm.api_key = "fake_key" # LiteLLM, which we use to talk to local models, requires this
+interpreter.llm.api_base = "http://localhost:1234/v1" # Point this at any OpenAI compatible server
+
+interpreter.chat()
+```
+
+```yaml Profile
+local: true
+```
+
+
+
+### Fast Mode
+
+Sets the model to gpt-3.5-turbo and encourages it to only write code without confirmation.
+
+
+
+```bash Terminal
+interpreter --fast
+```
+
+```yaml Profile
+fast: true
+```
+
+
+
+### Custom Instructions
+
+Appends custom instructions to the system message. This is useful for adding information about your system, preferred languages, etc.
+
+
+
+```bash Terminal
+interpreter --custom_instructions "This is a custom instruction."
+```
+
+```python Python
+interpreter.custom_instructions = "This is a custom instruction."
+```
+
+```yaml Profile
+custom_instructions: "This is a custom instruction."
+```
+
+
+
+### System Message
+
+We don't recommend modifying the system message, as doing so opts you out of future updates to the core system message. Use `--custom_instructions` instead, to add relevant information to the system message. If you must modify the system message, you can do so by using this argument, or by changing a profile file.
+
+
+
+```bash Terminal
+interpreter --system_message "You are Open Interpreter..."
+```
+
+```python Python
+interpreter.system_message = "You are Open Interpreter..."
+```
+
+```yaml Profile
+system_message: "You are Open Interpreter..."
+```
+
+
+
+### Disable Telemetry
+
+Opt out of [telemetry](telemetry/telemetry).
+
+
+
+```bash Terminal
+interpreter --disable_telemetry
+```
+
+```python Python
+interpreter.anonymized_telemetry = False
+```
+
+```yaml Profile
+disable_telemetry: true
+```
+
+
+
+### Offline
+
+This boolean flag determines whether to enable or disable some offline features like [open procedures](https://open-procedures.replit.app/). Use this in conjunction with the `model` parameter to set your language model.
+
+
+
+```python Python
+interpreter.offline = True
+```
+
+```bash Terminal
+interpreter --offline true
+```
+
+```yaml Profile
+offline: true
+```
+
+
+
+### Messages
+
+This property holds a list of `messages` between the user and the interpreter.
+
+You can use it to restore a conversation:
+
+```python
+interpreter.chat("Hi! Can you print hello world?")
+
+print(interpreter.messages)
+
+# This would output:
+
+# [
+# {
+# "role": "user",
+# "message": "Hi! Can you print hello world?"
+# },
+# {
+# "role": "assistant",
+# "message": "Sure!"
+# }
+# {
+# "role": "assistant",
+# "language": "python",
+# "code": "print('Hello, World!')",
+# "output": "Hello, World!"
+# }
+# ]
+
+#You can use this to restore `interpreter` to a previous conversation.
+interpreter.messages = messages # A list that resembles the one above
+```
+
+### User Message Template
+
+A template applied to the User's message. `{content}` will be replaced with the user's message, then sent to the language model.
+
+
+
+````python Python
+interpreter.user_message_template = "{content} Please send me some code that would be able to answer my question, in the form of ```python\n... the code ...\n``` or ```shell\n... the code ...\n```"
+````
+
+```python Profile
+interpreter.user_message_template = "{content}. Be concise, don't include anything unnecessary. Don't use placeholders, I can't edit code."
+```
+
+
+
+### Always Apply User Message Template
+
+The boolean flag for whether the User Message Template will be applied to every user message. The default is False which means the template is only applied to the last User message.
+
+
+
+```python Python
+interpreter.always_apply_user_message_template = False
+```
+
+```python Profile
+interpreter.always_apply_user_message_template = False
+```
+
+
+
+### Code Message Template
+
+A template applied to the Computer's output after running code. `{content}` will be replaced with the computer's output, then sent to the language model.
+
+
+
+```python Python
+interpreter.code_output_template = "Code output: {content}\nWhat does this output mean / what's next (if anything, or are we done)?"
+```
+
+```python Profile
+interpreter.code_output_template = "Code output: {content}\nWhat code needs to be run next?"
+```
+
+
+
+### Empty Code Message Template
+
+If the computer does not output anything after code execution, this value will be sent to the language model.
+
+
+
+```python Python
+interpreter.empty_code_output_template = "The code above was executed on my machine. It produced no text output. what's next (if anything, or are we done?)"
+```
+
+```python Profile
+interpreter.empty_code_output_template = "The code above was executed on my machine. It produced no text output. what's next?"
+```
+
+
+
+### Code Output Sender
+
+This field determines whether the computer / code output messages are sent as the assistant or as the user. The default is user.
+
+
+
+```python Python
+interpreter.code_output_sender = "user"
+```
+
+```python Profile
+interpreter.code_output_sender = "assistant"
+```
+
+
+
+# Computer
+
+The `computer` object in `interpreter.computer` is a virtual computer that the AI controls. Its primary interface/function is to execute code and return the output in real-time.
+
+### Offline
+
+Running the `computer` in offline mode will disable some online features, like the hosted [Computer API](https://api.openinterpreter.com/). Inherits from `interpreter.offline`.
+
+
+
+```python Python
+interpreter.computer.offline = True
+```
+
+```yaml Profile
+computer.offline: True
+```
+
+
+
+### Verbose
+
+This is primarily used for debugging `interpreter.computer`. Inherits from `interpreter.verbose`.
+
+
+
+```python Python
+interpreter.computer.verbose = True
+```
+
+```yaml Profile
+computer.verbose: True
+```
+
+
+
+### Emit Images
+
+The `emit_images` attribute in `interpreter.computer` controls whether the computer should emit images or not. This is inherited from `interpreter.llm.supports_vision`.
+
+This is used for multimodel vs. text only models. Running `computer.display.view()` will return an actual screenshot for multimodal models if `emit_images` is True. If it's False, `computer.display.view()` will return all the text on the screen.
+
+Many other functions of the computer can produce image/text outputs, and this parameter controls that.
+
+
+
+```python Python
+interpreter.computer.emit_images = True
+```
+
+```yaml Profile
+computer.emit_images: True
+```
+
+
+
+### Import Computer API
+
+Include the computer API in the system message. The default is False and won't import the computer API automatically
+
+
+
+```python Python
+interpreter.computer.import_computer_api = True
+```
+
+```yaml Profile
+computer.import_computer_api: True
+```
+
+
+````
diff --git a/archive/classic_docs/settings/example-profiles.mdx b/archive/classic_docs/settings/example-profiles.mdx
new file mode 100644
index 0000000000..f35cc82c84
--- /dev/null
+++ b/archive/classic_docs/settings/example-profiles.mdx
@@ -0,0 +1,10 @@
+---
+title: Example Profiles
+---
+
+### OS Mode
+
+```yaml
+os: True
+custom_instructions: "Always use Safari as the browser, and use Raycast instead of spotlight search by pressing option + space."
+```
diff --git a/archive/classic_docs/settings/profiles.mdx b/archive/classic_docs/settings/profiles.mdx
new file mode 100644
index 0000000000..f45bd254e4
--- /dev/null
+++ b/archive/classic_docs/settings/profiles.mdx
@@ -0,0 +1,32 @@
+---
+title: Profiles
+---
+
+Profiles are preconfigured settings for Open Interpreter that make it easy to get going quickly with a specific set of settings. Any [setting](/settings/all-settings) can be configured in a profile. Custom instructions are helpful to have in each profile, to customize the behavior of Open Interpreter for the specific use case that the profile is designed for.
+
+To load a profile, run:
+
+```bash
+interpreter --profile .yaml
+
+```
+
+All profiles are stored in their own folder, which can be accessed by running:
+
+```bash
+interpreter --profile
+
+```
+
+To create your own profile, you can add a `.yaml` file to this folder and add whatever [settings](/settings/all-settings) you'd like:
+
+```yaml
+custom_instructions: "Always use python, and be as concise as possible"
+llm.model: gpt-4
+llm.temperature: 0.5
+# Any other settings you'd like to add
+```
+
+Any profile named 'default.yaml' will be loaded by default.
+
+Profiles can be shared with others by sending them the profile yaml file!
diff --git a/archive/classic_docs/style.css b/archive/classic_docs/style.css
new file mode 100644
index 0000000000..5604b4cf6f
--- /dev/null
+++ b/archive/classic_docs/style.css
@@ -0,0 +1,28 @@
+.rounded-lg {
+ border-radius: 0;
+}
+
+/*
+
+.rounded-sm, .rounded-md, .rounded-lg, .rounded-xl, .rounded-2xl, .rounded-3xl {
+ border-radius: 0.125rem;
+}
+
+.rounded-full {
+ border-radius: 0.125rem;
+}
+
+*/
+
+.font-extrabold {
+ font-weight: 600;
+}
+
+.h1, .h2, .h3, .h4, .h5, .h6 {
+ font-weight: 600;
+}
+
+.body {
+ font-weight: normal;
+}
+
diff --git a/archive/classic_docs/telemetry/telemetry.mdx b/archive/classic_docs/telemetry/telemetry.mdx
new file mode 100644
index 0000000000..5b60ec6f49
--- /dev/null
+++ b/archive/classic_docs/telemetry/telemetry.mdx
@@ -0,0 +1,68 @@
+---
+title: Introduction
+---
+
+Open Interpreter contains a telemetry feature that collects **anonymous** usage information.
+
+We use this information to help us understand how OI is used, to help us prioritize work on new features and bug fixes, and to help us improve OI's performance and stability.
+
+# Opting out
+
+If you prefer to opt out of telemetry, you can do this in two ways.
+
+### Python
+
+Set `disable_telemetry` to `true` on the `interpreter` object:
+
+```python
+from interpreter import interpreter
+interpreter.disable_telemetry = True
+```
+
+### Terminal
+
+Use the `--disable_telemetry` flag:
+
+```shell
+interpreter --disable_telemetry
+```
+
+### Profile
+
+Set `disable_telemetry` to `true`. This will persist to future terminal sessions:
+
+```yaml
+disable_telemetry: true
+```
+
+### Environment Variables
+
+Set `DISABLE_TELEMETRY` to `true` in your shell or server environment.
+
+If you are running Open Interpreter on your local computer with `docker-compose` you can set this value in an `.env` file placed in the same directory as the `docker-compose.yml` file:
+
+```
+DISABLE_TELEMETRY=true
+```
+
+# What do you track?
+
+We will only track usage details that help us make product decisions, specifically:
+
+- Open Interpreter version and environment (i.e whether or not it's running in Python / a terminal)
+- When interpreter.chat is run, in what mode (e.g `--os` mode), and the type of the message being passed in (e.g `None`, `str`, or `list`)
+- Exceptions that occur within Open Interpreter (not tracebacks)
+
+We **do not** collect personally-identifiable or sensitive information, such as: usernames, hostnames, file names, environment variables, or hostnames of systems being tested.
+
+To view the list of events we track, you may reference the **[code](https://github.com/OpenInterpreter/open-interpreter/tree/main/interpreter/core)**
+
+## Where is telemetry information stored?
+
+We use **[Posthog](https://posthog.com/)** to store and visualize telemetry data.
+
+
+ Posthog is an open source platform for product analytics. Learn more about
+ Posthog on **[posthog.com](https://posthog.com/)** or
+ **[github.com/posthog](https://github.com/posthog/posthog)**
+
diff --git a/archive/classic_docs/troubleshooting/faq.mdx b/archive/classic_docs/troubleshooting/faq.mdx
new file mode 100644
index 0000000000..9dd2170055
--- /dev/null
+++ b/archive/classic_docs/troubleshooting/faq.mdx
@@ -0,0 +1,16 @@
+---
+title: "FAQ"
+description: "Frequently Asked Questions"
+---
+
+
+ As long as you're using a local language model, your messages / personal info
+ won't leave your computer. If you use a cloud model, we send your messages +
+ custom instructions to the model. We also have a basic telemetry
+ [function](https://github.com/OpenInterpreter/open-interpreter/blob/main/interpreter/core/core.py#L167)
+ (copied over from ChromaDB's telemetry) that anonymously tracks usage. This
+ only lets us know if a message was sent, includes no PII. OI errors will also
+ be reported here which includes the exception string. Detailed docs on all
+ this is [here](/telemetry/telemetry), and you can opt out by running
+ `--local`, `--offline`, or `--disable_telemetry`.
+
diff --git a/archive/classic_docs/usage/desktop/help.md b/archive/classic_docs/usage/desktop/help.md
new file mode 100644
index 0000000000..1cccfcab69
--- /dev/null
+++ b/archive/classic_docs/usage/desktop/help.md
@@ -0,0 +1 @@
+Reach out to help@openinterpreter.com for support.
diff --git a/archive/classic_docs/usage/desktop/install.mdx b/archive/classic_docs/usage/desktop/install.mdx
new file mode 100644
index 0000000000..988321eb13
--- /dev/null
+++ b/archive/classic_docs/usage/desktop/install.mdx
@@ -0,0 +1,7 @@
+---
+title: Desktop App
+---
+
+Our desktop application is currently in development and is not yet available to the public.
+
+You can apply for early access [here](https://0ggfznkwh4j.typeform.com/to/G21i9lJ2?typeform-source=docs.openinterpreter.com).
diff --git a/archive/classic_docs/usage/examples.mdx b/archive/classic_docs/usage/examples.mdx
new file mode 100644
index 0000000000..311b352ecc
--- /dev/null
+++ b/archive/classic_docs/usage/examples.mdx
@@ -0,0 +1,154 @@
+---
+title: Examples
+description: Get started by copying these code snippets into your terminal, a `.py` file, or a Jupyter notebook.
+---
+
+
+
+
+ Try Open Interpreter without installing anything on your computer
+
+
+
+ An example implementation of Open Interpreter's streaming capabilities
+
+
+
+
+---
+
+### Interactive Chat
+
+To start an interactive chat in your terminal, either run `interpreter` from the command line:
+
+```shell
+interpreter
+```
+
+Or `interpreter.chat()` from a .py file:
+
+```python
+interpreter.chat()
+```
+
+---
+
+### Programmatic Chat
+
+For more precise control, you can pass messages directly to `.chat(message)` in Python:
+
+```python
+interpreter.chat("Add subtitles to all videos in /videos.")
+
+# ... Displays output in your terminal, completes task ...
+
+interpreter.chat("These look great but can you make the subtitles bigger?")
+
+# ...
+```
+
+---
+
+### Start a New Chat
+
+In your terminal, Open Interpreter behaves like ChatGPT and will not remember previous conversations. Simply run `interpreter` to start a new chat:
+
+```shell
+interpreter
+```
+
+In Python, Open Interpreter remembers conversation history. If you want to start fresh, you can reset it:
+
+```python
+interpreter.messages = []
+```
+
+---
+
+### Save and Restore Chats
+
+In your terminal, Open Interpreter will save previous conversations to `/Open Interpreter/conversations/`.
+
+You can resume any of them by running `--conversations`. Use your arrow keys to select one , then press `ENTER` to resume it.
+
+```shell
+interpreter --conversations
+```
+
+In Python, `interpreter.chat()` returns a List of messages, which can be used to resume a conversation with `interpreter.messages = messages`:
+
+```python
+# Save messages to 'messages'
+messages = interpreter.chat("My name is Killian.")
+
+# Reset interpreter ("Killian" will be forgotten)
+interpreter.messages = []
+
+# Resume chat from 'messages' ("Killian" will be remembered)
+interpreter.messages = messages
+```
+
+---
+
+### Configure Default Settings
+
+We save default settings to a profile which can be edited by running the following command:
+
+```shell
+interpreter --profiles
+```
+
+You can use this to set your default language model, system message (custom instructions), max budget, etc.
+
+
+ **Note:** The Python library will also inherit settings from the default
+ profile file. You can change it by running `interpreter --profiles` and
+ editing `default.yaml`.
+
+
+---
+
+### Customize System Message
+
+In your terminal, modify the system message by [editing your configuration file as described here](#configure-default-settings).
+
+In Python, you can inspect and configure Open Interpreter's system message to extend its functionality, modify permissions, or give it more context.
+
+```python
+interpreter.system_message += """
+Run shell commands with -y so the user doesn't have to confirm them.
+"""
+print(interpreter.system_message)
+```
+
+---
+
+### Change your Language Model
+
+Open Interpreter uses [LiteLLM](https://docs.litellm.ai/docs/providers/) to connect to language models.
+
+You can change the model by setting the model parameter:
+
+```shell
+interpreter --model gpt-3.5-turbo
+interpreter --model claude-2
+interpreter --model command-nightly
+```
+
+In Python, set the model on the object:
+
+```python
+interpreter.llm.model = "gpt-3.5-turbo"
+```
+
+[Find the appropriate "model" string for your language model here.](https://docs.litellm.ai/docs/providers/)
diff --git a/archive/classic_docs/usage/python/arguments.mdx b/archive/classic_docs/usage/python/arguments.mdx
new file mode 100644
index 0000000000..2ce39ca99c
--- /dev/null
+++ b/archive/classic_docs/usage/python/arguments.mdx
@@ -0,0 +1,209 @@
+---
+title: Arguments
+---
+
+
+ Learn how to build Open Interpreter into your application.
+
+
+#### `messages`
+
+This property holds a list of `messages` between the user and the interpreter.
+
+You can use it to restore a conversation:
+
+```python
+interpreter.chat("Hi! Can you print hello world?")
+
+print(interpreter.messages)
+
+# This would output:
+
+[
+ {
+ "role": "user",
+ "message": "Hi! Can you print hello world?"
+ },
+ {
+ "role": "assistant",
+ "message": "Sure!"
+ }
+ {
+ "role": "assistant",
+ "language": "python",
+ "code": "print('Hello, World!')",
+ "output": "Hello, World!"
+ }
+]
+```
+
+You can use this to restore `interpreter` to a previous conversation.
+
+```python
+interpreter.messages = messages # A list that resembles the one above
+```
+
+---
+
+#### `offline`
+
+This replaced `interpreter.local` in the New Computer Update (`0.2.0`).
+
+This boolean flag determines whether to enable or disable some offline features like [open procedures](https://open-procedures.replit.app/).
+
+```python
+interpreter.offline = True # Check for updates, use procedures
+interpreter.offline = False # Don't check for updates, don't use procedures
+```
+
+Use this in conjunction with the `model` parameter to set your language model.
+
+---
+
+#### `auto_run`
+
+Setting this flag to `True` allows Open Interpreter to automatically run the generated code without user confirmation.
+
+```python
+interpreter.auto_run = True # Don't require user confirmation
+interpreter.auto_run = False # Require user confirmation (default)
+```
+
+---
+
+#### `verbose`
+
+Use this boolean flag to toggle verbose mode on or off. Verbose mode will print information at every step to help diagnose problems.
+
+```python
+interpreter.verbose = True # Turns on verbose mode
+interpreter.verbose = False # Turns off verbose mode
+```
+
+---
+
+#### `max_output`
+
+This property sets the maximum number of tokens for the output response.
+
+```python
+interpreter.max_output = 2000
+```
+
+---
+
+#### `conversation_history`
+
+A boolean flag to indicate if the conversation history should be stored or not.
+
+```python
+interpreter.conversation_history = True # To store history
+interpreter.conversation_history = False # To not store history
+```
+
+---
+
+#### `conversation_filename`
+
+This property sets the filename where the conversation history will be stored.
+
+```python
+interpreter.conversation_filename = "my_conversation.json"
+```
+
+---
+
+#### `conversation_history_path`
+
+You can set the path where the conversation history will be stored.
+
+```python
+import os
+interpreter.conversation_history_path = os.path.join("my_folder", "conversations")
+```
+
+---
+
+#### `model`
+
+Specifies the language model to be used.
+
+```python
+interpreter.llm.model = "gpt-3.5-turbo"
+```
+
+---
+
+#### `temperature`
+
+Sets the randomness level of the model's output.
+
+```python
+interpreter.llm.temperature = 0.7
+```
+
+---
+
+#### `system_message`
+
+This stores the model's system message as a string. Explore or modify it:
+
+```python
+interpreter.system_message += "\nRun all shell commands with -y."
+```
+
+---
+
+#### `context_window`
+
+This manually sets the context window size in tokens.
+
+We try to guess the right context window size for you model, but you can override it with this parameter.
+
+```python
+interpreter.llm.context_window = 16000
+```
+
+---
+
+#### `max_tokens`
+
+Sets the maximum number of tokens the model can generate in a single response.
+
+```python
+interpreter.llm.max_tokens = 100
+```
+
+---
+
+#### `api_base`
+
+If you are using a custom API, you can specify its base URL here.
+
+```python
+interpreter.llm.api_base = "https://api.example.com"
+```
+
+---
+
+#### `api_key`
+
+Set your API key for authentication.
+
+```python
+interpreter.llm.api_key = "your_api_key_here"
+```
+
+---
+
+#### `max_budget`
+
+This property sets the maximum budget limit for the session in USD.
+
+```python
+interpreter.max_budget = 0.01 # 1 cent
+```
diff --git a/archive/classic_docs/usage/python/budget-manager.mdx b/archive/classic_docs/usage/python/budget-manager.mdx
new file mode 100644
index 0000000000..e39762df9a
--- /dev/null
+++ b/archive/classic_docs/usage/python/budget-manager.mdx
@@ -0,0 +1,9 @@
+---
+title: Budget Manager
+---
+
+The `max_budget` property sets the maximum budget limit for the session in USD.
+
+```python
+interpreter.max_budget = 0.01 # 1 cent
+```
\ No newline at end of file
diff --git a/archive/classic_docs/usage/python/conversation-history.mdx b/archive/classic_docs/usage/python/conversation-history.mdx
new file mode 100644
index 0000000000..66b072515a
--- /dev/null
+++ b/archive/classic_docs/usage/python/conversation-history.mdx
@@ -0,0 +1,20 @@
+---
+title: Conversation History
+---
+
+Conversations will be saved in your application directory. **This is true for python and for the terminal interface.**
+
+The command below, when run in your terminal, will show you which folder they're being saved in (use your arrow keys to move down and press enter over `> Open Folder`):
+
+```shell
+interpreter --conversations
+```
+
+You can turn off conversation history for a particular conversation:
+
+```python
+from interpreter import interpreter
+
+interpreter.conversation_history = False
+interpreter.chat() # Conversation history will not be saved
+```
\ No newline at end of file
diff --git a/archive/classic_docs/usage/python/magic-commands.mdx b/archive/classic_docs/usage/python/magic-commands.mdx
new file mode 100644
index 0000000000..8680f3b780
--- /dev/null
+++ b/archive/classic_docs/usage/python/magic-commands.mdx
@@ -0,0 +1,17 @@
+---
+title: Magic Commands
+---
+
+If you run an interactive chat in python, you can use *magic commands* built for terminal usage:
+
+```python
+interpreter.chat()
+```
+
+The following magic commands will work:
+
+- %verbose [true/false]: Toggle verbose mode. Without arguments or with true it enters verbose mode. With false it exits verbose mode.
+- %reset: Resets the current session's conversation.
+- %undo: Removes the previous user message and the AI's response from the message history.
+- %tokens [prompt]: (Experimental) Calculate the tokens that will be sent with the next prompt as context and estimate their cost. Optionally calculate the tokens and estimated cost of a prompt if one is provided. Relies on LiteLLM's cost_per_token() method for estimated costs.
+- %help: Show the help message.
\ No newline at end of file
diff --git a/archive/classic_docs/usage/python/multiple-instances.mdx b/archive/classic_docs/usage/python/multiple-instances.mdx
new file mode 100644
index 0000000000..b19bb11425
--- /dev/null
+++ b/archive/classic_docs/usage/python/multiple-instances.mdx
@@ -0,0 +1,33 @@
+To create multiple instances, use the base class, `OpenInterpreter`:
+
+```python
+from interpreter import OpenInterpreter
+
+agent_1 = OpenInterpreter()
+agent_1.system_message = "This is a separate instance."
+
+agent_2 = OpenInterpreter()
+agent_2.system_message = "This is yet another instance."
+```
+
+For fun, you could make these instances talk to each other:
+
+```python
+def swap_roles(messages):
+ for message in messages:
+ if message['role'] == 'user':
+ message['role'] = 'assistant'
+ elif message['role'] == 'assistant':
+ message['role'] = 'user'
+ return messages
+
+agents = [agent_1, agent_2]
+
+# Kick off the conversation
+messages = [{"role": "user", "type": "message", "content": "Hello!"}]
+
+while True:
+ for agent in agents:
+ messages = agent.chat(messages)
+ messages = swap_roles(messages)
+```
diff --git a/archive/classic_docs/usage/python/settings.mdx b/archive/classic_docs/usage/python/settings.mdx
new file mode 100644
index 0000000000..9ca6200b6c
--- /dev/null
+++ b/archive/classic_docs/usage/python/settings.mdx
@@ -0,0 +1,11 @@
+---
+title: Settings
+---
+
+Default settings will be inherited from a profile in your application directory. **This is true for python and for the terminal interface.**
+
+To open the file, run:
+
+```bash
+interpreter --profiles
+```
diff --git a/archive/classic_docs/usage/terminal/arguments.mdx b/archive/classic_docs/usage/terminal/arguments.mdx
new file mode 100644
index 0000000000..b6c39d54d2
--- /dev/null
+++ b/archive/classic_docs/usage/terminal/arguments.mdx
@@ -0,0 +1,440 @@
+---
+title: Arguments
+---
+
+**[Modes](/docs/usage/terminal/arguments#modes)**
+
+`--vision`, `--os`.
+
+**[Model Settings](/docs/usage/terminal/arguments#model-settings)**
+
+`--model`, `--fast`, `--local`, `--temperature`, `--context_window`, `--max_tokens`, `--max_output`, `--api_base`, `--api_key`, `--api_version`, `--llm_supports_functions`, `--llm_supports_vision`.
+
+**[Configuration](/docs/usage/terminal/arguments#Configuration)**
+
+`--profiles`, `--profile`, `--custom_instructions`, `--system_message`.
+
+**[Options](/docs/usage/terminal/arguments#options)**
+
+`--safe_mode`, `--auto_run`, `--loop`, `--verbose`, `--max_budget`, `--speak_messages`, `--multi_line`.
+
+**[Other](/docs/usage/terminal/arguments#other)**
+
+`--version`, `--help`.
+
+---
+
+## Modes
+
+#### `--vision` or `-vi`
+
+Enables vision mode for multimodal models. Defaults to GPT-4-turbo.
+
+
+```bash Terminal
+interpreter --vision
+```
+
+```yaml Config
+vision: true
+```
+
+
+
+#### `--os` or `-o`
+
+Enables OS mode for multimodal models. Defaults to GPT-4-turbo.
+
+
+
+ ```bash Terminal
+ interpreter --os
+ ```
+
+ ```yaml Config
+ os: true
+ ```
+
+
+
+---
+
+## Model Settings
+
+#### `--model` or `-m`
+
+Specifies which language model to use. Check out the [models](https://docs.openinterpreter.com/language-model-setup/introduction) section for a list of available models.
+
+
+
+```bash Terminal
+interpreter --model "gpt-3.5-turbo"
+```
+
+```yaml Config
+model: gpt-3.5-turbo
+```
+
+
+
+#### `--fast` or `-f`
+
+Sets the model to gpt-3.5-turbo.
+
+
+```bash Terminal
+interpreter --fast
+```
+
+```yaml Config
+fast: true
+```
+
+
+
+#### `--local` or `-l`
+
+Run the model locally. Check the [models page](/language-model-setup/introduction) for more information.
+
+
+
+```bash Terminal
+interpreter --local
+```
+
+```yaml Config
+local: true
+```
+
+
+
+#### `--temperature` or `-t`
+
+Sets the randomness level of the model's output.
+
+
+
+```bash Terminal
+interpreter --temperature 0.7
+```
+
+```yaml Config
+temperature: 0.7
+```
+
+
+
+#### `--context_window` or `-c`
+
+Manually set the context window size in tokens for the model.
+
+
+
+```bash Terminal
+interpreter --context_window 16000
+```
+
+```yaml Config
+context_window: 16000
+```
+
+
+
+#### `--max_tokens` or `-x`
+
+Sets the maximum number of tokens that the model can generate in a single response.
+
+
+
+```bash Terminal
+interpreter --max_tokens 100
+```
+
+```yaml Config
+max_tokens: 100
+```
+
+
+
+#### `--max_output` or `-xo`
+
+Set the maximum number of characters for code outputs.
+
+
+```bash Terminal
+interpreter --max_output 1000
+```
+
+```yaml Config
+max_output: 1000
+```
+
+
+#### `--api_base` or `-ab`
+
+If you are using a custom API, specify its base URL with this argument.
+
+
+
+```bash Terminal
+interpreter --api_base "https://api.example.com"
+```
+
+```yaml Config
+api_base: https://api.example.com
+```
+
+
+
+#### `--api_key` or `-ak`
+
+Set your API key for authentication when making API calls.
+
+
+
+```bash Terminal
+interpreter --api_key "your_api_key_here"
+```
+
+```yaml Config
+api_key: your_api_key_here
+```
+
+
+
+#### `--api_version` or `-av`
+
+Optionally set the API version to use with your selected model. (This will override environment variables)
+
+
+```bash Terminal
+interpreter --api_version 2.0.2
+```
+
+```yaml Config
+api_version: 2.0.2
+```
+
+
+#### `--llm_supports_functions` or `-lsf`
+
+Inform Open Interpreter that the language model you're using supports function calling.
+
+
+```bash Terminal
+interpreter --llm_supports_functions
+```
+
+```yaml Config
+llm_supports_functions: true
+```
+
+
+#### `--no-llm_supports_functions`
+
+Inform Open Interpreter that the language model you're using does not support function calling.
+
+
+ ```bash Terminal interpreter --no-llm_supports_functions ```
+
+
+#### `--llm_supports_vision` or `-lsv`
+
+Inform Open Interpreter that the language model you're using supports vision.
+
+
+```bash Terminal
+interpreter --llm_supports_vision
+```
+
+```yaml Config
+llm_supports_vision: true
+```
+
+
+
+---
+
+## Configuration
+
+#### `--profiles`
+
+Opens the directory containing all profiles. They can be edited in your default editor.
+
+
+```bash Terminal
+interpreter --profilees
+```
+
+
+
+#### `--profile` or `-p`
+
+Optionally set a profile to use.
+
+
+```bash Terminal
+interpreter --profile "default.yaml"
+```
+
+
+
+#### `--custom_instructions` or `-ci`
+
+Appends custom instructions to the system message. This is useful for adding information about the your system, preferred languages, etc.
+
+
+```bash Terminal
+interpreter --custom_instructions "This is a custom instruction."
+```
+
+```yaml Config
+custom_instructions: "This is a custom instruction."
+```
+
+
+
+#### `--system_message` or `-s`
+
+We don't recommend modifying the system message, as doing so opts you out of future updates to the system message. Use `--custom_instructions` instead, to add relevant information to the system message. If you must modify the system message, you can do so by using this argument, or by opening the profile using `--profiles`.
+
+
+```bash Terminal
+interpreter --system_message "You are Open Interpreter..."
+```
+
+```yaml Config
+system_message: "You are Open Interpreter..."
+```
+
+## Options
+
+#### `--safe_mode`
+
+Enable or disable experimental safety mechanisms like code scanning. Valid options are `off`, `ask`, and `auto`.
+
+
+
+```bash Terminal
+interpreter --safe_mode ask
+```
+
+```yaml Config
+safe_mode: ask
+```
+
+
+
+#### `--auto_run` or `-y`
+
+Automatically run the interpreter without requiring user confirmation.
+
+
+
+```bash Terminal
+interpreter --auto_run
+```
+
+```yaml Config
+auto_run: true
+```
+
+
+
+#### `--loop`
+
+Runs Open Interpreter in a loop, requiring it to admit to completing or failing every task.
+
+
+```bash Terminal
+interpreter --loop
+```
+
+```yaml Config
+loop: true
+```
+
+
+
+#### `--verbose` or `-v`
+
+Run the interpreter in verbose mode. Debug information will be printed at each step to help diagnose issues.
+
+
+
+```bash Terminal
+interpreter --verbose
+```
+
+```yaml Config
+verbose: true
+```
+
+
+
+#### `--max_budget` or `-b`
+
+Sets the maximum budget limit for the session in USD.
+
+
+
+```bash Terminal
+interpreter --max_budget 0.01
+```
+
+```yaml Config
+max_budget: 0.01
+```
+
+
+
+#### `--speak_messages` or `-sm`
+
+(Mac Only) Speak messages out loud using the system's text-to-speech engine.
+
+
+```bash Terminal
+interpreter --speak_messages
+```
+
+```yaml Config
+speak_messages: true
+```
+
+
+
+#### `--multi_line` or `-ml`
+
+Enable multi-line inputs starting and ending with ` ``` `
+
+
+```bash Terminal
+interpreter --multi_line
+```
+
+```yaml Config
+multi_line: true
+```
+
+
+
+---
+
+## Other
+
+#### `--version`
+
+Get the current installed version number of Open Interpreter.
+
+```bash Terminal interpreter --version ```
+
+#### `--help` or `-h`
+
+Display all available terminal arguments.
+
+
+```bash Terminal
+interpreter --help
+```
+
+
diff --git a/archive/classic_docs/usage/terminal/budget-manager.mdx b/archive/classic_docs/usage/terminal/budget-manager.mdx
new file mode 100644
index 0000000000..453e05547b
--- /dev/null
+++ b/archive/classic_docs/usage/terminal/budget-manager.mdx
@@ -0,0 +1,8 @@
+---
+title: Budget Manager
+---
+
+You can set a maximum budget per session:
+```bash
+interpreter --max_budget 0.01
+```
\ No newline at end of file
diff --git a/archive/classic_docs/usage/terminal/magic-commands.mdx b/archive/classic_docs/usage/terminal/magic-commands.mdx
new file mode 100644
index 0000000000..98a7fd7b16
--- /dev/null
+++ b/archive/classic_docs/usage/terminal/magic-commands.mdx
@@ -0,0 +1,16 @@
+---
+title: Magic Commands
+---
+
+Magic commands can be used to control the interpreter's behavior in interactive mode:
+
+- `%% [commands]`: Run commands in system shell.
+- `%verbose [true/false]`: Toggle verbose mode. Without arguments or with 'true', it enters verbose mode. With 'false', it exits verbose mode.
+- `%reset`: Resets the current session's conversation.
+- `%undo`: Remove previous messages and its response from the message history.
+- `%save_message [path]`: Saves messages to a specified JSON path. If no path is provided, it defaults to 'messages.json'.
+- `%load_message [path]`: Loads messages from a specified JSON path. If no path is provided, it defaults to 'messages.json'.
+- `%tokens [prompt]`: EXPERIMENTAL: Calculate the tokens used by the next request based on the current conversation's messages and estimate the cost of that request; optionally provide a prompt to also calculate the tokens used by that prompt and the total amount of tokens that will be sent with the next request.
+- `%info`: Show system and interpreter information.
+- `%help`: Show this help message.
+- `%markdown [path]`: Export the conversation to a specified Markdown path. If no path is provided, it will be saved to the Downloads folder with a generated conversation name.
diff --git a/archive/classic_docs/usage/terminal/settings.mdx b/archive/classic_docs/usage/terminal/settings.mdx
new file mode 100644
index 0000000000..61d0ac3cb6
--- /dev/null
+++ b/archive/classic_docs/usage/terminal/settings.mdx
@@ -0,0 +1,26 @@
+---
+title: Settings
+---
+
+Default settings can be edited via a profile. To open the file, run:
+
+```bash
+interpreter --profiles
+```
+
+| Key | Value |
+| ------------------------ | -------------------------------------------------------- |
+| `llm_model` | String ["openai/gpt-4", "openai/local", "azure/gpt-3.5"] |
+| `llm_temperature` | Float [0.0 -> 1.0] |
+| `llm_supports_vision` | Boolean [True/False] |
+| `llm_supports_functions` | Boolean [True/False] |
+| `llm_context_window` | Integer [3000] |
+| `llm_max_tokens` | Integer [3000] |
+| `llm_api_base` | String ["http://ip_address:port", "https://openai.com"] |
+| `llm_api_key` | String ["sk-Your-Key"] |
+| `llm_api_version` | String ["version-number"] |
+| `llm_max_budget` | Float [0.01] #USD $0.01 |
+| `offline` | Boolean [True/False] |
+| `vision` | Boolean [True/False] |
+| `auto_run` | Boolean [True/False] |
+| `verbose` | Boolean [True/False] |
diff --git a/archive/classic_docs/usage/terminal/vision.mdx b/archive/classic_docs/usage/terminal/vision.mdx
new file mode 100644
index 0000000000..b136381d72
--- /dev/null
+++ b/archive/classic_docs/usage/terminal/vision.mdx
@@ -0,0 +1,11 @@
+---
+title: Vision
+---
+
+To use vision (highly experimental), run the following command:
+
+```bash
+interpreter --vision
+```
+
+If a file path to an image is found in your input, it will be loaded into the vision model (`gpt-4o` for now).
diff --git a/examples/Dockerfile b/archive/classic_examples/Dockerfile
similarity index 100%
rename from examples/Dockerfile
rename to archive/classic_examples/Dockerfile
diff --git a/examples/JARVIS.ipynb b/archive/classic_examples/JARVIS.ipynb
similarity index 100%
rename from examples/JARVIS.ipynb
rename to archive/classic_examples/JARVIS.ipynb
diff --git a/examples/Open_Interpreter_Demo.ipynb b/archive/classic_examples/Open_Interpreter_Demo.ipynb
similarity index 100%
rename from examples/Open_Interpreter_Demo.ipynb
rename to archive/classic_examples/Open_Interpreter_Demo.ipynb
diff --git a/examples/README.md b/archive/classic_examples/README.md
similarity index 100%
rename from examples/README.md
rename to archive/classic_examples/README.md
diff --git a/examples/custom_tool.ipynb b/archive/classic_examples/custom_tool.ipynb
similarity index 100%
rename from examples/custom_tool.ipynb
rename to archive/classic_examples/custom_tool.ipynb
diff --git a/examples/interactive_quickstart.py b/archive/classic_examples/interactive_quickstart.py
similarity index 100%
rename from examples/interactive_quickstart.py
rename to archive/classic_examples/interactive_quickstart.py
diff --git a/examples/jan_computer_control.ipynb b/archive/classic_examples/jan_computer_control.ipynb
similarity index 100%
rename from examples/jan_computer_control.ipynb
rename to archive/classic_examples/jan_computer_control.ipynb
diff --git a/examples/local3.ipynb b/archive/classic_examples/local3.ipynb
similarity index 100%
rename from examples/local3.ipynb
rename to archive/classic_examples/local3.ipynb
diff --git a/examples/local_server.ipynb b/archive/classic_examples/local_server.ipynb
similarity index 100%
rename from examples/local_server.ipynb
rename to archive/classic_examples/local_server.ipynb
diff --git a/examples/organize_photos.ipynb b/archive/classic_examples/organize_photos.ipynb
similarity index 100%
rename from examples/organize_photos.ipynb
rename to archive/classic_examples/organize_photos.ipynb
diff --git a/examples/screenpipe.ipynb b/archive/classic_examples/screenpipe.ipynb
similarity index 100%
rename from examples/screenpipe.ipynb
rename to archive/classic_examples/screenpipe.ipynb
diff --git a/examples/talk_to_your_database.ipynb b/archive/classic_examples/talk_to_your_database.ipynb
similarity index 100%
rename from examples/talk_to_your_database.ipynb
rename to archive/classic_examples/talk_to_your_database.ipynb
diff --git a/archive/classic_interpreter/__init__.py b/archive/classic_interpreter/__init__.py
new file mode 100644
index 0000000000..7b9b7bd559
--- /dev/null
+++ b/archive/classic_interpreter/__init__.py
@@ -0,0 +1,69 @@
+import sys
+
+if "--os" in sys.argv:
+ from rich import print as rich_print
+ from rich.markdown import Markdown
+ from rich.rule import Rule
+
+ def print_markdown(message):
+ """
+ Display markdown message. Works with multiline strings with lots of indentation.
+ Will automatically make single line > tags beautiful.
+ """
+
+ for line in message.split("\n"):
+ line = line.strip()
+ if line == "":
+ print("")
+ elif line == "---":
+ rich_print(Rule(style="white"))
+ else:
+ try:
+ rich_print(Markdown(line))
+ except UnicodeEncodeError as e:
+ # Replace the problematic character or handle the error as needed
+ print("Error displaying line:", line)
+
+ if "\n" not in message and message.startswith(">"):
+ # Aesthetic choice. For these tags, they need a space below them
+ print("")
+
+ import pkg_resources
+ import requests
+ from packaging import version
+
+ def check_for_update():
+ # Fetch the latest version from the PyPI API
+ response = requests.get(f"https://pypi.org/pypi/open-interpreter/json")
+ latest_version = response.json()["info"]["version"]
+
+ # Get the current version using pkg_resources
+ current_version = pkg_resources.get_distribution("open-interpreter").version
+
+ return version.parse(latest_version) > version.parse(current_version)
+
+ if check_for_update():
+ print_markdown(
+ "> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
+ )
+
+ if "--voice" in sys.argv:
+ print("Coming soon...")
+ from ..computer_use.loop import run_async_main
+
+ run_async_main()
+ exit()
+
+from .core.async_core import AsyncInterpreter
+from .core.computer.terminal.base_language import BaseLanguage
+from .core.core import OpenInterpreter
+
+interpreter = OpenInterpreter()
+computer = interpreter.computer
+
+# ____ ____ __ __
+# / __ \____ ___ ____ / _/___ / /____ _________ ________ / /____ _____
+# / / / / __ \/ _ \/ __ \ / // __ \/ __/ _ \/ ___/ __ \/ ___/ _ \/ __/ _ \/ ___/
+# / /_/ / /_/ / __/ / / / _/ // / / / /_/ __/ / / /_/ / / / __/ /_/ __/ /
+# \____/ .___/\___/_/ /_/ /___/_/ /_/\__/\___/_/ / .___/_/ \___/\__/\___/_/
+# /_/ /_/
diff --git a/interpreter/core/computer/ai/__init__.py b/archive/classic_interpreter/core/__init__.py
similarity index 100%
rename from interpreter/core/computer/ai/__init__.py
rename to archive/classic_interpreter/core/__init__.py
diff --git a/interpreter/core/archived_server_1.py b/archive/classic_interpreter/core/archived_server_1.py
similarity index 100%
rename from interpreter/core/archived_server_1.py
rename to archive/classic_interpreter/core/archived_server_1.py
diff --git a/interpreter/core/archived_server_2.py b/archive/classic_interpreter/core/archived_server_2.py
similarity index 100%
rename from interpreter/core/archived_server_2.py
rename to archive/classic_interpreter/core/archived_server_2.py
diff --git a/interpreter/core/async_core.py b/archive/classic_interpreter/core/async_core.py
similarity index 100%
rename from interpreter/core/async_core.py
rename to archive/classic_interpreter/core/async_core.py
diff --git a/interpreter/core/computer/browser/__init__.py b/archive/classic_interpreter/core/computer/__init__.py
similarity index 100%
rename from interpreter/core/computer/browser/__init__.py
rename to archive/classic_interpreter/core/computer/__init__.py
diff --git a/interpreter/core/computer/calendar/__init__.py b/archive/classic_interpreter/core/computer/ai/__init__.py
similarity index 100%
rename from interpreter/core/computer/calendar/__init__.py
rename to archive/classic_interpreter/core/computer/ai/__init__.py
diff --git a/interpreter/core/computer/ai/ai.py b/archive/classic_interpreter/core/computer/ai/ai.py
similarity index 100%
rename from interpreter/core/computer/ai/ai.py
rename to archive/classic_interpreter/core/computer/ai/ai.py
diff --git a/interpreter/core/computer/clipboard/__init__.py b/archive/classic_interpreter/core/computer/browser/__init__.py
similarity index 100%
rename from interpreter/core/computer/clipboard/__init__.py
rename to archive/classic_interpreter/core/computer/browser/__init__.py
diff --git a/interpreter/core/computer/browser/browser.py b/archive/classic_interpreter/core/computer/browser/browser.py
similarity index 100%
rename from interpreter/core/computer/browser/browser.py
rename to archive/classic_interpreter/core/computer/browser/browser.py
diff --git a/interpreter/core/computer/browser/browser_next.py b/archive/classic_interpreter/core/computer/browser/browser_next.py
similarity index 100%
rename from interpreter/core/computer/browser/browser_next.py
rename to archive/classic_interpreter/core/computer/browser/browser_next.py
diff --git a/interpreter/core/computer/contacts/__init__.py b/archive/classic_interpreter/core/computer/calendar/__init__.py
similarity index 100%
rename from interpreter/core/computer/contacts/__init__.py
rename to archive/classic_interpreter/core/computer/calendar/__init__.py
diff --git a/interpreter/core/computer/calendar/calendar.py b/archive/classic_interpreter/core/computer/calendar/calendar.py
similarity index 100%
rename from interpreter/core/computer/calendar/calendar.py
rename to archive/classic_interpreter/core/computer/calendar/calendar.py
diff --git a/interpreter/core/computer/display/__init__.py b/archive/classic_interpreter/core/computer/clipboard/__init__.py
similarity index 100%
rename from interpreter/core/computer/display/__init__.py
rename to archive/classic_interpreter/core/computer/clipboard/__init__.py
diff --git a/interpreter/core/computer/clipboard/clipboard.py b/archive/classic_interpreter/core/computer/clipboard/clipboard.py
similarity index 100%
rename from interpreter/core/computer/clipboard/clipboard.py
rename to archive/classic_interpreter/core/computer/clipboard/clipboard.py
diff --git a/interpreter/core/computer/computer.py b/archive/classic_interpreter/core/computer/computer.py
similarity index 100%
rename from interpreter/core/computer/computer.py
rename to archive/classic_interpreter/core/computer/computer.py
diff --git a/interpreter/core/computer/docs/__init__.py b/archive/classic_interpreter/core/computer/contacts/__init__.py
similarity index 100%
rename from interpreter/core/computer/docs/__init__.py
rename to archive/classic_interpreter/core/computer/contacts/__init__.py
diff --git a/interpreter/core/computer/contacts/contacts.py b/archive/classic_interpreter/core/computer/contacts/contacts.py
similarity index 100%
rename from interpreter/core/computer/contacts/contacts.py
rename to archive/classic_interpreter/core/computer/contacts/contacts.py
diff --git a/interpreter/core/computer/files/__init__.py b/archive/classic_interpreter/core/computer/display/__init__.py
similarity index 100%
rename from interpreter/core/computer/files/__init__.py
rename to archive/classic_interpreter/core/computer/display/__init__.py
diff --git a/interpreter/core/computer/display/display.py b/archive/classic_interpreter/core/computer/display/display.py
similarity index 100%
rename from interpreter/core/computer/display/display.py
rename to archive/classic_interpreter/core/computer/display/display.py
diff --git a/interpreter/core/computer/display/point/point.py b/archive/classic_interpreter/core/computer/display/point/point.py
similarity index 100%
rename from interpreter/core/computer/display/point/point.py
rename to archive/classic_interpreter/core/computer/display/point/point.py
diff --git a/interpreter/core/computer/keyboard/__init__.py b/archive/classic_interpreter/core/computer/docs/__init__.py
similarity index 100%
rename from interpreter/core/computer/keyboard/__init__.py
rename to archive/classic_interpreter/core/computer/docs/__init__.py
diff --git a/interpreter/core/computer/docs/docs.py b/archive/classic_interpreter/core/computer/docs/docs.py
similarity index 100%
rename from interpreter/core/computer/docs/docs.py
rename to archive/classic_interpreter/core/computer/docs/docs.py
diff --git a/interpreter/core/computer/mail/__init__.py b/archive/classic_interpreter/core/computer/files/__init__.py
similarity index 100%
rename from interpreter/core/computer/mail/__init__.py
rename to archive/classic_interpreter/core/computer/files/__init__.py
diff --git a/interpreter/core/computer/files/files.py b/archive/classic_interpreter/core/computer/files/files.py
similarity index 100%
rename from interpreter/core/computer/files/files.py
rename to archive/classic_interpreter/core/computer/files/files.py
diff --git a/interpreter/core/computer/mouse/__init__.py b/archive/classic_interpreter/core/computer/keyboard/__init__.py
similarity index 100%
rename from interpreter/core/computer/mouse/__init__.py
rename to archive/classic_interpreter/core/computer/keyboard/__init__.py
diff --git a/interpreter/core/computer/keyboard/keyboard.py b/archive/classic_interpreter/core/computer/keyboard/keyboard.py
similarity index 100%
rename from interpreter/core/computer/keyboard/keyboard.py
rename to archive/classic_interpreter/core/computer/keyboard/keyboard.py
diff --git a/interpreter/core/computer/os/__init__.py b/archive/classic_interpreter/core/computer/mail/__init__.py
similarity index 100%
rename from interpreter/core/computer/os/__init__.py
rename to archive/classic_interpreter/core/computer/mail/__init__.py
diff --git a/interpreter/core/computer/mail/mail.py b/archive/classic_interpreter/core/computer/mail/mail.py
similarity index 100%
rename from interpreter/core/computer/mail/mail.py
rename to archive/classic_interpreter/core/computer/mail/mail.py
diff --git a/interpreter/core/computer/sms/__init__.py b/archive/classic_interpreter/core/computer/mouse/__init__.py
similarity index 100%
rename from interpreter/core/computer/sms/__init__.py
rename to archive/classic_interpreter/core/computer/mouse/__init__.py
diff --git a/interpreter/core/computer/mouse/mouse.py b/archive/classic_interpreter/core/computer/mouse/mouse.py
similarity index 100%
rename from interpreter/core/computer/mouse/mouse.py
rename to archive/classic_interpreter/core/computer/mouse/mouse.py
diff --git a/interpreter/core/computer/terminal/__init__.py b/archive/classic_interpreter/core/computer/os/__init__.py
similarity index 100%
rename from interpreter/core/computer/terminal/__init__.py
rename to archive/classic_interpreter/core/computer/os/__init__.py
diff --git a/interpreter/core/computer/os/os.py b/archive/classic_interpreter/core/computer/os/os.py
similarity index 100%
rename from interpreter/core/computer/os/os.py
rename to archive/classic_interpreter/core/computer/os/os.py
diff --git a/interpreter/core/computer/skills/skills.py b/archive/classic_interpreter/core/computer/skills/skills.py
similarity index 100%
rename from interpreter/core/computer/skills/skills.py
rename to archive/classic_interpreter/core/computer/skills/skills.py
diff --git a/interpreter/core/computer/terminal/languages/__init__.py b/archive/classic_interpreter/core/computer/sms/__init__.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/__init__.py
rename to archive/classic_interpreter/core/computer/sms/__init__.py
diff --git a/interpreter/core/computer/sms/sms.py b/archive/classic_interpreter/core/computer/sms/sms.py
similarity index 100%
rename from interpreter/core/computer/sms/sms.py
rename to archive/classic_interpreter/core/computer/sms/sms.py
diff --git a/interpreter/core/computer/vision/__init__.py b/archive/classic_interpreter/core/computer/terminal/__init__.py
similarity index 100%
rename from interpreter/core/computer/vision/__init__.py
rename to archive/classic_interpreter/core/computer/terminal/__init__.py
diff --git a/interpreter/core/computer/terminal/base_language.py b/archive/classic_interpreter/core/computer/terminal/base_language.py
similarity index 100%
rename from interpreter/core/computer/terminal/base_language.py
rename to archive/classic_interpreter/core/computer/terminal/base_language.py
diff --git a/interpreter/core/llm/__init__.py b/archive/classic_interpreter/core/computer/terminal/languages/__init__.py
similarity index 100%
rename from interpreter/core/llm/__init__.py
rename to archive/classic_interpreter/core/computer/terminal/languages/__init__.py
diff --git a/interpreter/core/computer/terminal/languages/applescript.py b/archive/classic_interpreter/core/computer/terminal/languages/applescript.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/applescript.py
rename to archive/classic_interpreter/core/computer/terminal/languages/applescript.py
diff --git a/interpreter/core/computer/terminal/languages/html.py b/archive/classic_interpreter/core/computer/terminal/languages/html.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/html.py
rename to archive/classic_interpreter/core/computer/terminal/languages/html.py
diff --git a/interpreter/core/computer/terminal/languages/java.py b/archive/classic_interpreter/core/computer/terminal/languages/java.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/java.py
rename to archive/classic_interpreter/core/computer/terminal/languages/java.py
diff --git a/interpreter/core/computer/terminal/languages/javascript.py b/archive/classic_interpreter/core/computer/terminal/languages/javascript.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/javascript.py
rename to archive/classic_interpreter/core/computer/terminal/languages/javascript.py
diff --git a/interpreter/core/computer/terminal/languages/jupyter_language.py b/archive/classic_interpreter/core/computer/terminal/languages/jupyter_language.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/jupyter_language.py
rename to archive/classic_interpreter/core/computer/terminal/languages/jupyter_language.py
diff --git a/interpreter/core/computer/terminal/languages/powershell.py b/archive/classic_interpreter/core/computer/terminal/languages/powershell.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/powershell.py
rename to archive/classic_interpreter/core/computer/terminal/languages/powershell.py
diff --git a/interpreter/core/computer/terminal/languages/python.py b/archive/classic_interpreter/core/computer/terminal/languages/python.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/python.py
rename to archive/classic_interpreter/core/computer/terminal/languages/python.py
diff --git a/interpreter/core/computer/terminal/languages/r.py b/archive/classic_interpreter/core/computer/terminal/languages/r.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/r.py
rename to archive/classic_interpreter/core/computer/terminal/languages/r.py
diff --git a/interpreter/core/computer/terminal/languages/react.py b/archive/classic_interpreter/core/computer/terminal/languages/react.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/react.py
rename to archive/classic_interpreter/core/computer/terminal/languages/react.py
diff --git a/interpreter/core/computer/terminal/languages/ruby.py b/archive/classic_interpreter/core/computer/terminal/languages/ruby.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/ruby.py
rename to archive/classic_interpreter/core/computer/terminal/languages/ruby.py
diff --git a/interpreter/core/computer/terminal/languages/shell.py b/archive/classic_interpreter/core/computer/terminal/languages/shell.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/shell.py
rename to archive/classic_interpreter/core/computer/terminal/languages/shell.py
diff --git a/interpreter/core/computer/terminal/languages/subprocess_language.py b/archive/classic_interpreter/core/computer/terminal/languages/subprocess_language.py
similarity index 100%
rename from interpreter/core/computer/terminal/languages/subprocess_language.py
rename to archive/classic_interpreter/core/computer/terminal/languages/subprocess_language.py
diff --git a/interpreter/core/computer/terminal/terminal.py b/archive/classic_interpreter/core/computer/terminal/terminal.py
similarity index 100%
rename from interpreter/core/computer/terminal/terminal.py
rename to archive/classic_interpreter/core/computer/terminal/terminal.py
diff --git a/interpreter/core/computer/utils/computer_vision.py b/archive/classic_interpreter/core/computer/utils/computer_vision.py
similarity index 100%
rename from interpreter/core/computer/utils/computer_vision.py
rename to archive/classic_interpreter/core/computer/utils/computer_vision.py
diff --git a/interpreter/core/computer/utils/get_active_window.py b/archive/classic_interpreter/core/computer/utils/get_active_window.py
similarity index 100%
rename from interpreter/core/computer/utils/get_active_window.py
rename to archive/classic_interpreter/core/computer/utils/get_active_window.py
diff --git a/interpreter/core/computer/utils/html_to_png_base64.py b/archive/classic_interpreter/core/computer/utils/html_to_png_base64.py
similarity index 100%
rename from interpreter/core/computer/utils/html_to_png_base64.py
rename to archive/classic_interpreter/core/computer/utils/html_to_png_base64.py
diff --git a/interpreter/core/computer/utils/recipient_utils.py b/archive/classic_interpreter/core/computer/utils/recipient_utils.py
similarity index 100%
rename from interpreter/core/computer/utils/recipient_utils.py
rename to archive/classic_interpreter/core/computer/utils/recipient_utils.py
diff --git a/interpreter/core/computer/utils/run_applescript.py b/archive/classic_interpreter/core/computer/utils/run_applescript.py
similarity index 100%
rename from interpreter/core/computer/utils/run_applescript.py
rename to archive/classic_interpreter/core/computer/utils/run_applescript.py
diff --git a/interpreter/core/utils/__init__.py b/archive/classic_interpreter/core/computer/vision/__init__.py
similarity index 100%
rename from interpreter/core/utils/__init__.py
rename to archive/classic_interpreter/core/computer/vision/__init__.py
diff --git a/interpreter/core/computer/vision/vision.py b/archive/classic_interpreter/core/computer/vision/vision.py
similarity index 100%
rename from interpreter/core/computer/vision/vision.py
rename to archive/classic_interpreter/core/computer/vision/vision.py
diff --git a/interpreter/core/core.py b/archive/classic_interpreter/core/core.py
similarity index 100%
rename from interpreter/core/core.py
rename to archive/classic_interpreter/core/core.py
diff --git a/interpreter/core/default_system_message.py b/archive/classic_interpreter/core/default_system_message.py
similarity index 100%
rename from interpreter/core/default_system_message.py
rename to archive/classic_interpreter/core/default_system_message.py
diff --git a/interpreter/terminal_interface/__init__.py b/archive/classic_interpreter/core/llm/__init__.py
similarity index 100%
rename from interpreter/terminal_interface/__init__.py
rename to archive/classic_interpreter/core/llm/__init__.py
diff --git a/interpreter/core/llm/llm.py b/archive/classic_interpreter/core/llm/llm.py
similarity index 100%
rename from interpreter/core/llm/llm.py
rename to archive/classic_interpreter/core/llm/llm.py
diff --git a/interpreter/core/llm/run_function_calling_llm.py b/archive/classic_interpreter/core/llm/run_function_calling_llm.py
similarity index 100%
rename from interpreter/core/llm/run_function_calling_llm.py
rename to archive/classic_interpreter/core/llm/run_function_calling_llm.py
diff --git a/interpreter/core/llm/run_text_llm.py b/archive/classic_interpreter/core/llm/run_text_llm.py
similarity index 100%
rename from interpreter/core/llm/run_text_llm.py
rename to archive/classic_interpreter/core/llm/run_text_llm.py
diff --git a/interpreter/core/llm/run_tool_calling_llm.py b/archive/classic_interpreter/core/llm/run_tool_calling_llm.py
similarity index 100%
rename from interpreter/core/llm/run_tool_calling_llm.py
rename to archive/classic_interpreter/core/llm/run_tool_calling_llm.py
diff --git a/interpreter/core/llm/utils/convert_to_openai_messages.py b/archive/classic_interpreter/core/llm/utils/convert_to_openai_messages.py
similarity index 100%
rename from interpreter/core/llm/utils/convert_to_openai_messages.py
rename to archive/classic_interpreter/core/llm/utils/convert_to_openai_messages.py
diff --git a/interpreter/core/llm/utils/merge_deltas.py b/archive/classic_interpreter/core/llm/utils/merge_deltas.py
similarity index 100%
rename from interpreter/core/llm/utils/merge_deltas.py
rename to archive/classic_interpreter/core/llm/utils/merge_deltas.py
diff --git a/interpreter/core/llm/utils/parse_partial_json.py b/archive/classic_interpreter/core/llm/utils/parse_partial_json.py
similarity index 100%
rename from interpreter/core/llm/utils/parse_partial_json.py
rename to archive/classic_interpreter/core/llm/utils/parse_partial_json.py
diff --git a/interpreter/core/render_message.py b/archive/classic_interpreter/core/render_message.py
similarity index 100%
rename from interpreter/core/render_message.py
rename to archive/classic_interpreter/core/render_message.py
diff --git a/interpreter/core/respond.py b/archive/classic_interpreter/core/respond.py
similarity index 100%
rename from interpreter/core/respond.py
rename to archive/classic_interpreter/core/respond.py
diff --git a/interpreter_1/misc/__init__.py b/archive/classic_interpreter/core/utils/__init__.py
similarity index 100%
rename from interpreter_1/misc/__init__.py
rename to archive/classic_interpreter/core/utils/__init__.py
diff --git a/interpreter/core/utils/lazy_import.py b/archive/classic_interpreter/core/utils/lazy_import.py
similarity index 100%
rename from interpreter/core/utils/lazy_import.py
rename to archive/classic_interpreter/core/utils/lazy_import.py
diff --git a/interpreter/core/utils/scan_code.py b/archive/classic_interpreter/core/utils/scan_code.py
similarity index 100%
rename from interpreter/core/utils/scan_code.py
rename to archive/classic_interpreter/core/utils/scan_code.py
diff --git a/interpreter/core/utils/system_debug_info.py b/archive/classic_interpreter/core/utils/system_debug_info.py
similarity index 100%
rename from interpreter/core/utils/system_debug_info.py
rename to archive/classic_interpreter/core/utils/system_debug_info.py
diff --git a/interpreter/core/utils/telemetry.py b/archive/classic_interpreter/core/utils/telemetry.py
similarity index 100%
rename from interpreter/core/utils/telemetry.py
rename to archive/classic_interpreter/core/utils/telemetry.py
diff --git a/interpreter/core/utils/temporary_file.py b/archive/classic_interpreter/core/utils/temporary_file.py
similarity index 100%
rename from interpreter/core/utils/temporary_file.py
rename to archive/classic_interpreter/core/utils/temporary_file.py
diff --git a/interpreter/core/utils/truncate_output.py b/archive/classic_interpreter/core/utils/truncate_output.py
similarity index 100%
rename from interpreter/core/utils/truncate_output.py
rename to archive/classic_interpreter/core/utils/truncate_output.py
diff --git a/interpreter_1/ui/__init__.py b/archive/classic_interpreter/terminal_interface/__init__.py
similarity index 100%
rename from interpreter_1/ui/__init__.py
rename to archive/classic_interpreter/terminal_interface/__init__.py
diff --git a/interpreter/terminal_interface/components/base_block.py b/archive/classic_interpreter/terminal_interface/components/base_block.py
similarity index 100%
rename from interpreter/terminal_interface/components/base_block.py
rename to archive/classic_interpreter/terminal_interface/components/base_block.py
diff --git a/interpreter/terminal_interface/components/code_block.py b/archive/classic_interpreter/terminal_interface/components/code_block.py
similarity index 100%
rename from interpreter/terminal_interface/components/code_block.py
rename to archive/classic_interpreter/terminal_interface/components/code_block.py
diff --git a/interpreter/terminal_interface/components/message_block.py b/archive/classic_interpreter/terminal_interface/components/message_block.py
similarity index 100%
rename from interpreter/terminal_interface/components/message_block.py
rename to archive/classic_interpreter/terminal_interface/components/message_block.py
diff --git a/interpreter/terminal_interface/contributing_conversations.py b/archive/classic_interpreter/terminal_interface/contributing_conversations.py
similarity index 100%
rename from interpreter/terminal_interface/contributing_conversations.py
rename to archive/classic_interpreter/terminal_interface/contributing_conversations.py
diff --git a/interpreter/terminal_interface/conversation_navigator.py b/archive/classic_interpreter/terminal_interface/conversation_navigator.py
similarity index 100%
rename from interpreter/terminal_interface/conversation_navigator.py
rename to archive/classic_interpreter/terminal_interface/conversation_navigator.py
diff --git a/interpreter/terminal_interface/local_setup.py b/archive/classic_interpreter/terminal_interface/local_setup.py
similarity index 100%
rename from interpreter/terminal_interface/local_setup.py
rename to archive/classic_interpreter/terminal_interface/local_setup.py
diff --git a/interpreter/terminal_interface/magic_commands.py b/archive/classic_interpreter/terminal_interface/magic_commands.py
similarity index 100%
rename from interpreter/terminal_interface/magic_commands.py
rename to archive/classic_interpreter/terminal_interface/magic_commands.py
diff --git a/interpreter/terminal_interface/profiles/defaults/assistant.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/assistant.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/assistant.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/assistant.py
diff --git a/interpreter/terminal_interface/profiles/defaults/aws-docs.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/aws-docs.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/aws-docs.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/aws-docs.py
diff --git a/interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/bedrock-anthropic.py
diff --git a/interpreter/terminal_interface/profiles/defaults/cerebras.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/cerebras.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/cerebras.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/cerebras.py
diff --git a/interpreter/terminal_interface/profiles/defaults/codestral-few-shot.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/codestral-few-shot.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/codestral-few-shot.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/codestral-few-shot.py
diff --git a/interpreter/terminal_interface/profiles/defaults/codestral-os.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/codestral-os.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/codestral-os.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/codestral-os.py
diff --git a/interpreter/terminal_interface/profiles/defaults/codestral-vision.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/codestral-vision.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/codestral-vision.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/codestral-vision.py
diff --git a/interpreter/terminal_interface/profiles/defaults/codestral.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/codestral.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/codestral.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/codestral.py
diff --git a/interpreter/terminal_interface/profiles/defaults/cortex-llama32.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/cortex-llama32.py
similarity index 99%
rename from interpreter/terminal_interface/profiles/defaults/cortex-llama32.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/cortex-llama32.py
index 48c237c61b..3435bdf1fd 100644
--- a/interpreter/terminal_interface/profiles/defaults/cortex-llama32.py
+++ b/archive/classic_interpreter/terminal_interface/profiles/defaults/cortex-llama32.py
@@ -11,7 +11,6 @@
from interpreter import interpreter
-
# Update the model to match t
interpreter.llm.model = "llama3.2:3b-gguf-q8-0"
interpreter.llm.context_window = 8192
diff --git a/interpreter/terminal_interface/profiles/defaults/default.yaml b/archive/classic_interpreter/terminal_interface/profiles/defaults/default.yaml
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/default.yaml
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/default.yaml
diff --git a/interpreter/terminal_interface/profiles/defaults/e2b.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/e2b.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/e2b.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/e2b.py
diff --git a/interpreter/terminal_interface/profiles/defaults/fast.yaml b/archive/classic_interpreter/terminal_interface/profiles/defaults/fast.yaml
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/fast.yaml
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/fast.yaml
diff --git a/interpreter/terminal_interface/profiles/defaults/gemini.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/gemini.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/gemini.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/gemini.py
diff --git a/interpreter/terminal_interface/profiles/defaults/gemma2.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/gemma2.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/gemma2.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/gemma2.py
diff --git a/interpreter/terminal_interface/profiles/defaults/groq.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/groq.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/groq.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/groq.py
diff --git a/interpreter/terminal_interface/profiles/defaults/haiku.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/haiku.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/haiku.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/haiku.py
diff --git a/interpreter/terminal_interface/profiles/defaults/llama3-os.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/llama3-os.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/llama3-os.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/llama3-os.py
diff --git a/interpreter/terminal_interface/profiles/defaults/llama3-vision.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/llama3-vision.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/llama3-vision.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/llama3-vision.py
diff --git a/interpreter/terminal_interface/profiles/defaults/llama3.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/llama3.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/llama3.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/llama3.py
diff --git a/interpreter/terminal_interface/profiles/defaults/llama31-database.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/llama31-database.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/llama31-database.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/llama31-database.py
diff --git a/interpreter/terminal_interface/profiles/defaults/local-assistant.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/local-assistant.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/local-assistant.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/local-assistant.py
diff --git a/interpreter/terminal_interface/profiles/defaults/local-os.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/local-os.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/local-os.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/local-os.py
diff --git a/interpreter/terminal_interface/profiles/defaults/local.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/local.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/local.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/local.py
diff --git a/interpreter/terminal_interface/profiles/defaults/obsidian.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/obsidian.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/obsidian.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/obsidian.py
diff --git a/interpreter/terminal_interface/profiles/defaults/os.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/os.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/os.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/os.py
diff --git a/interpreter/terminal_interface/profiles/defaults/qwen.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/qwen.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/qwen.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/qwen.py
diff --git a/interpreter/terminal_interface/profiles/defaults/screenpipe.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/screenpipe.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/screenpipe.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/screenpipe.py
diff --git a/interpreter/terminal_interface/profiles/defaults/snowpark.yml b/archive/classic_interpreter/terminal_interface/profiles/defaults/snowpark.yml
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/snowpark.yml
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/snowpark.yml
diff --git a/interpreter/terminal_interface/profiles/defaults/template_profile.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/template_profile.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/template_profile.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/template_profile.py
diff --git a/interpreter/terminal_interface/profiles/defaults/the01.py b/archive/classic_interpreter/terminal_interface/profiles/defaults/the01.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/the01.py
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/the01.py
diff --git a/interpreter/terminal_interface/profiles/defaults/vision.yaml b/archive/classic_interpreter/terminal_interface/profiles/defaults/vision.yaml
similarity index 100%
rename from interpreter/terminal_interface/profiles/defaults/vision.yaml
rename to archive/classic_interpreter/terminal_interface/profiles/defaults/vision.yaml
diff --git a/interpreter/terminal_interface/profiles/historical_profiles.py b/archive/classic_interpreter/terminal_interface/profiles/historical_profiles.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/historical_profiles.py
rename to archive/classic_interpreter/terminal_interface/profiles/historical_profiles.py
diff --git a/interpreter/terminal_interface/profiles/profiles.py b/archive/classic_interpreter/terminal_interface/profiles/profiles.py
similarity index 100%
rename from interpreter/terminal_interface/profiles/profiles.py
rename to archive/classic_interpreter/terminal_interface/profiles/profiles.py
diff --git a/interpreter/terminal_interface/render_past_conversation.py b/archive/classic_interpreter/terminal_interface/render_past_conversation.py
similarity index 100%
rename from interpreter/terminal_interface/render_past_conversation.py
rename to archive/classic_interpreter/terminal_interface/render_past_conversation.py
diff --git a/interpreter/terminal_interface/start_terminal_interface.py b/archive/classic_interpreter/terminal_interface/start_terminal_interface.py
similarity index 100%
rename from interpreter/terminal_interface/start_terminal_interface.py
rename to archive/classic_interpreter/terminal_interface/start_terminal_interface.py
diff --git a/interpreter/terminal_interface/terminal_interface.py b/archive/classic_interpreter/terminal_interface/terminal_interface.py
similarity index 100%
rename from interpreter/terminal_interface/terminal_interface.py
rename to archive/classic_interpreter/terminal_interface/terminal_interface.py
diff --git a/interpreter/terminal_interface/utils/check_for_package.py b/archive/classic_interpreter/terminal_interface/utils/check_for_package.py
similarity index 100%
rename from interpreter/terminal_interface/utils/check_for_package.py
rename to archive/classic_interpreter/terminal_interface/utils/check_for_package.py
diff --git a/interpreter/terminal_interface/utils/check_for_update.py b/archive/classic_interpreter/terminal_interface/utils/check_for_update.py
similarity index 100%
rename from interpreter/terminal_interface/utils/check_for_update.py
rename to archive/classic_interpreter/terminal_interface/utils/check_for_update.py
diff --git a/interpreter/terminal_interface/utils/cli_input.py b/archive/classic_interpreter/terminal_interface/utils/cli_input.py
similarity index 100%
rename from interpreter/terminal_interface/utils/cli_input.py
rename to archive/classic_interpreter/terminal_interface/utils/cli_input.py
diff --git a/interpreter/terminal_interface/utils/count_tokens.py b/archive/classic_interpreter/terminal_interface/utils/count_tokens.py
similarity index 100%
rename from interpreter/terminal_interface/utils/count_tokens.py
rename to archive/classic_interpreter/terminal_interface/utils/count_tokens.py
diff --git a/interpreter/terminal_interface/utils/display_markdown_message.py b/archive/classic_interpreter/terminal_interface/utils/display_markdown_message.py
similarity index 100%
rename from interpreter/terminal_interface/utils/display_markdown_message.py
rename to archive/classic_interpreter/terminal_interface/utils/display_markdown_message.py
diff --git a/interpreter/terminal_interface/utils/display_output.py b/archive/classic_interpreter/terminal_interface/utils/display_output.py
similarity index 100%
rename from interpreter/terminal_interface/utils/display_output.py
rename to archive/classic_interpreter/terminal_interface/utils/display_output.py
diff --git a/interpreter/terminal_interface/utils/export_to_markdown.py b/archive/classic_interpreter/terminal_interface/utils/export_to_markdown.py
similarity index 100%
rename from interpreter/terminal_interface/utils/export_to_markdown.py
rename to archive/classic_interpreter/terminal_interface/utils/export_to_markdown.py
diff --git a/interpreter/terminal_interface/utils/find_image_path.py b/archive/classic_interpreter/terminal_interface/utils/find_image_path.py
similarity index 100%
rename from interpreter/terminal_interface/utils/find_image_path.py
rename to archive/classic_interpreter/terminal_interface/utils/find_image_path.py
diff --git a/interpreter/terminal_interface/utils/get_conversations.py b/archive/classic_interpreter/terminal_interface/utils/get_conversations.py
similarity index 100%
rename from interpreter/terminal_interface/utils/get_conversations.py
rename to archive/classic_interpreter/terminal_interface/utils/get_conversations.py
diff --git a/interpreter/terminal_interface/utils/in_jupyter_notebook.py b/archive/classic_interpreter/terminal_interface/utils/in_jupyter_notebook.py
similarity index 100%
rename from interpreter/terminal_interface/utils/in_jupyter_notebook.py
rename to archive/classic_interpreter/terminal_interface/utils/in_jupyter_notebook.py
diff --git a/interpreter/terminal_interface/utils/local_storage_path.py b/archive/classic_interpreter/terminal_interface/utils/local_storage_path.py
similarity index 100%
rename from interpreter/terminal_interface/utils/local_storage_path.py
rename to archive/classic_interpreter/terminal_interface/utils/local_storage_path.py
diff --git a/interpreter/terminal_interface/utils/oi_dir.py b/archive/classic_interpreter/terminal_interface/utils/oi_dir.py
similarity index 100%
rename from interpreter/terminal_interface/utils/oi_dir.py
rename to archive/classic_interpreter/terminal_interface/utils/oi_dir.py
diff --git a/interpreter/terminal_interface/validate_llm_settings.py b/archive/classic_interpreter/terminal_interface/validate_llm_settings.py
similarity index 100%
rename from interpreter/terminal_interface/validate_llm_settings.py
rename to archive/classic_interpreter/terminal_interface/validate_llm_settings.py
diff --git a/tests/config.test.yaml b/archive/classic_tests/config.test.yaml
similarity index 100%
rename from tests/config.test.yaml
rename to archive/classic_tests/config.test.yaml
diff --git a/tests/core/computer/files/test_files.py b/archive/classic_tests/core/computer/files/test_files.py
similarity index 100%
rename from tests/core/computer/files/test_files.py
rename to archive/classic_tests/core/computer/files/test_files.py
diff --git a/tests/core/computer/test_computer.py b/archive/classic_tests/core/computer/test_computer.py
similarity index 100%
rename from tests/core/computer/test_computer.py
rename to archive/classic_tests/core/computer/test_computer.py
diff --git a/tests/core/test_async_core.py b/archive/classic_tests/core/test_async_core.py
similarity index 100%
rename from tests/core/test_async_core.py
rename to archive/classic_tests/core/test_async_core.py
diff --git a/tests/test_interpreter.py b/archive/classic_tests/test_interpreter.py
similarity index 100%
rename from tests/test_interpreter.py
rename to archive/classic_tests/test_interpreter.py
diff --git a/archive/cli-2.py b/archive/cli-2.py
deleted file mode 100644
index bd13780ea2..0000000000
--- a/archive/cli-2.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import importlib.util
-import os
-import sys
-
-import platformdirs
-
-from .main import run_async_main
-from .misc.help import help_message
-from .misc.welcome import welcome_message
-
-
-def main():
- oi_dir = platformdirs.user_config_dir("open-interpreter")
- profiles_dir = os.path.join(oi_dir, "profiles")
-
- # Get profile path from command line args
- profile = None
- for i, arg in enumerate(sys.argv):
- if arg == "--profile" and i + 1 < len(sys.argv):
- profile = sys.argv[i + 1]
- break
-
- if profile:
- if not os.path.isfile(profile):
- profile = os.path.join(profiles_dir, profile)
- if not os.path.isfile(profile):
- profile += ".py"
- if not os.path.isfile(profile):
- print(f"Invalid profile path: {profile}")
- exit(1)
-
- # Load the profile module from the provided path
- spec = importlib.util.spec_from_file_location("profile", profile)
- profile_module = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(profile_module)
-
- # Get the interpreter from the profile
- interpreter = profile_module.interpreter
-
- if len(sys.argv) > 1 and sys.argv[1] == "--help":
- help_message()
- else:
- welcome_message()
- run_async_main()
diff --git a/archive/edit copy.py b/archive/edit copy.py
deleted file mode 100644
index 21fb77f0e4..0000000000
--- a/archive/edit copy.py
+++ /dev/null
@@ -1,295 +0,0 @@
-import json
-import os
-import random
-import sys
-
-from pygments import highlight
-from pygments.formatters import Terminal256Formatter
-from pygments.lexers import TextLexer, get_lexer_by_name
-from pygments.styles import get_all_styles
-from yaspin import yaspin
-from yaspin.spinners import Spinners
-
-
-class ContentRenderer:
- def __init__(self, style):
- self.buffer = ""
- self.started = False
- self.style = style
-
- def feed(self, content):
- pass
-
- def flush(self):
- pass
-
-
-class CodeRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- self.line_number = 1
- self.code_lang = "python"
- self.buffer = ""
- self.rendered_content = ""
- self.spinner = yaspin(Spinners.simpleDots, text=" ")
- self.is_spinning = False
-
- def feed(self, content):
- # Start spinner if we have content to process
- if not self.is_spinning and content.strip():
- self.spinner.start()
- self.is_spinning = True
-
- # Only process the new part of the content
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content # Update what we've seen
-
- # Process complete lines
- if "\n" in self.buffer:
- lines = self.buffer.split("\n")
- for line in lines[:-1]:
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- self._render_line(line)
- if lines[-1].strip(): # If there's more content coming
- self.spinner.start()
- self.is_spinning = True
- self.buffer = lines[-1] # Keep the incomplete line
-
- def _render_line(self, line):
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- formatter = Terminal256Formatter(style=self.style)
-
- # Highlight the line
- highlighted = highlight(line + "\n", lexer, formatter).rstrip()
- line_prefix = f"{SchemaRenderer.GRAY_COLOR}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
-
- sys.stdout.write(f"{line_prefix}{highlighted}\n")
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- if self.buffer:
- self._render_line(self.buffer)
- self.buffer = ""
-
-
-class PathRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- self.rendered_content = "" # Track what we've already rendered
-
- def feed(self, content):
- # Only render new content
- new_content = content[len(self.rendered_content) :]
- if new_content:
- sys.stdout.write(f"{new_content}")
- sys.stdout.flush()
- self.rendered_content += new_content
-
-
-class CommandRenderer(ContentRenderer):
- ICONS = {
- "create": "✦",
- "view": "⚆",
- "str_replace": "↻",
- "insert": "⊹",
- "undo_edit": "↫",
- }
-
- def __init__(self, style):
- super().__init__(style)
- self.buffer = ""
- self.rendered_commands = set() # Track complete commands we've rendered
-
- def feed(self, content):
- # If we've already rendered this complete command, skip
- if content in self.rendered_commands:
- return
-
- # Buffer the content
- self.buffer = content
-
- # If this is a complete command (matches one of our icons), render it
- if content.strip() in self.ICONS:
- icon = self.ICONS.get(content.strip(), "•")
- ICON_COLOR = "\033[37m" # White color
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR} {ICON_COLOR}{icon}\033[0m{SchemaRenderer.GRAY_COLOR} │ {content}{SchemaRenderer.RESET_COLOR} "
- )
- sys.stdout.flush()
- self.rendered_commands.add(content)
- self.buffer = ""
-
- def flush(self):
- pass # No need to flush since we render when we get a complete command
-
-
-class SchemaRenderer:
- GRAY_COLOR = "\033[38;5;240m"
- RESET_COLOR = "\033[0m"
-
- @staticmethod
- def print_separator(char="─", newline=True, line=True):
- terminal_width = os.get_terminal_size().columns
- if newline:
- sys.stdout.write("\n")
- if line:
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR}────{char}"
- + "─" * (terminal_width - 5)
- + f"{SchemaRenderer.RESET_COLOR}\n"
- )
- else:
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR} {char}{SchemaRenderer.RESET_COLOR}\n"
- )
-
- schemas = {
- "command": {
- "renderer": CommandRenderer,
- "before": lambda: SchemaRenderer.print_separator("┬"),
- },
- "path": {
- "renderer": PathRenderer,
- "before": lambda: None,
- },
- "content": {
- "renderer": CodeRenderer,
- "before": lambda: SchemaRenderer.print_separator("┼"),
- },
- }
-
-
-class CodeStreamView:
- def __init__(self):
- self.current_renderers = {}
- self.partial_json = ""
- self.code_style = random.choice(list(get_all_styles()))
- self.code_style = "monokai" # bw
- # print("Style:", self.code_style)
- self.current_schema = None
- self.current_json = None # Store the current parsed JSON state
-
- def _parse_json(self, json_chunk):
- # Add new chunk to existing buffer
- self.partial_json += json_chunk
-
- # Try to parse the complete buffer first
- try:
- result = json.loads(self.partial_json)
- self.current_json = result # Store the current state
- # Only clear buffer if we successfully parsed the entire thing
- if result.get("end", False):
- self.partial_json = ""
- return result
- except:
- pass
-
- # Rest of the method remains the same for handling incomplete JSON
- new_s = ""
- stack = []
- is_inside_string = False
- escaped = False
-
- # Process each character in the string one at a time.
- for char in self.partial_json:
- if is_inside_string:
- if char == '"' and not escaped:
- is_inside_string = False
- elif char == "\n" and not escaped:
- char = (
- "\\n" # Replace the newline character with the escape sequence.
- )
- elif char == "\\":
- escaped = not escaped
- else:
- escaped = False
- else:
- if char == '"':
- is_inside_string = True
- escaped = False
- elif char == "{":
- stack.append("}")
- elif char == "[":
- stack.append("]")
- elif char == "}" or char == "]":
- if stack and stack[-1] == char:
- stack.pop()
- else:
- # Mismatched closing character; the input is malformed.
- return None
-
- # Append the processed character to the new string.
- new_s += char
-
- # If we're still inside a string at the end of processing, we need to close the string.
- if is_inside_string:
- new_s += '"'
-
- # Close any remaining open structures in the reverse order that they were opened.
- for closing_char in reversed(stack):
- new_s += closing_char
-
- # Attempt to parse the modified string as JSON.
- try:
- result = json.loads(new_s)
- self.current_json = result # Store the current state
- # Only clear buffer if we successfully parsed a complete message
- if result.get("end", False):
- self.partial_json = ""
- return result
- except:
- # Don't print the failure message since it's expected for incomplete JSON
- return None
-
- def feed(self, chunk):
- json_obj = self._parse_json(chunk)
- if not json_obj:
- return
-
- # Process the JSON object
- for schema_type, schema in SchemaRenderer.schemas.items():
- if schema_type in json_obj:
- # If this is a new schema type, initialize it
- if schema_type not in self.current_renderers:
- if schema["before"]:
- schema["before"]()
- self.current_renderers[schema_type] = schema["renderer"](
- self.code_style
- )
-
- # Feed the content to the renderer
- self.current_renderers[schema_type].feed(json_obj[schema_type])
-
- # If this is the end of the content, flush and cleanup
- if json_obj.get("end", False):
- self.current_renderers[schema_type].flush()
- if schema["after"]:
- schema["after"]()
- del self.current_renderers[schema_type]
-
- def close(self):
- # Flush any remaining content
- for renderer in self.current_renderers.values():
- renderer.flush()
- self.current_renderers.clear()
-
- # Print horizontal separator with newline based on command type
- if self.current_json.get("command") == "view":
- SchemaRenderer.print_separator("┴", newline=True)
- else:
- SchemaRenderer.print_separator("┴", newline=False)
diff --git a/archive/main.py b/archive/main.py
deleted file mode 100755
index 7051d44583..0000000000
--- a/archive/main.py
+++ /dev/null
@@ -1,860 +0,0 @@
-"""
-Based on Anthropic's computer use example at https://github.com/anthropics/anthropic-quickstarts/blob/main/computer-use-demo/computer_use_demo/loop.py
-"""
-
-import asyncio
-import dataclasses
-import json
-import os
-import platform
-import sys
-import threading
-import time
-import traceback
-import uuid
-from collections.abc import Callable
-from datetime import datetime
-
-import pyautogui
-from prompt_toolkit import PromptSession
-from prompt_toolkit.formatted_text import HTML
-
-from .misc.desktop import desktop_prompt
-from .ui.markdown import MarkdownRenderer
-
-try:
- from enum import StrEnum
-except ImportError: # 3.10 compatibility
- from enum import Enum as StrEnum
-
-from typing import Any, List, cast
-
-from anthropic import Anthropic, AnthropicBedrock, AnthropicVertex
-from anthropic.types import ToolResultBlockParam
-from anthropic.types.beta import (
- BetaCacheControlEphemeralParam,
- BetaContentBlock,
- BetaContentBlockParam,
- BetaImageBlockParam,
- BetaMessage,
- BetaMessageParam,
- BetaRawContentBlockDeltaEvent,
- BetaRawContentBlockStartEvent,
- BetaRawContentBlockStopEvent,
- BetaTextBlock,
- BetaTextBlockParam,
- BetaToolResultBlockParam,
- BetaToolUseBlockParam,
-)
-from yaspin import yaspin
-from yaspin.spinners import Spinners
-
-from .tools import BashTool, ComputerTool, EditTool, ToolCollection, ToolResult
-from .ui.tool import ToolRenderer
-
-os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
-import litellm
-
-md = MarkdownRenderer()
-
-COMPUTER_USE_BETA_FLAG = "computer-use-2024-10-22"
-PROMPT_CACHING_BETA_FLAG = "prompt-caching-2024-07-31"
-
-from typing import List, Optional
-
-import uvicorn
-from fastapi import FastAPI
-from fastapi.responses import StreamingResponse
-from pydantic import BaseModel
-
-# Add these near the top with other global variables
-approved_paths = set() # Store approved file paths
-approved_commands = set() # Store approved bash commands
-
-# Add this near the top of the file, with other imports and global variables # <- this is from anthropic but it sounds so cursor lmao
-messages: List[BetaMessageParam] = []
-
-
-class APIProvider(StrEnum):
- ANTHROPIC = "anthropic"
- BEDROCK = "bedrock"
- VERTEX = "vertex"
-
-
-PROVIDER_TO_DEFAULT_MODEL_NAME: dict[APIProvider, str] = {
- APIProvider.ANTHROPIC: "claude-3-5-sonnet-20241022",
- APIProvider.BEDROCK: "anthropic.claude-3-5-sonnet-20241022-v2:0",
- APIProvider.VERTEX: "claude-3-5-sonnet-v2@20241022",
-}
-
-
-# This system prompt is optimized for the Docker environment in this repository and
-# specific tool combinations enabled.
-# We encourage modifying this system prompt to ensure the model has context for the
-# environment it is running in, and to provide any additional information that may be
-# helpful for the task at hand.
-
-SYSTEM_PROMPT = f"""
-* You are an AI assistant with access to a machine running on {"Mac OS" if platform.system() == "Darwin" else platform.system()} with internet access.
-* When using your computer function calls, they take a while to run and send back to you. Where possible/feasible, try to chain multiple of these calls all into one function calls request.
-* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
-* The user's cwd is {os.getcwd()} and username is {os.getlogin()}.
-"""
-
-# Update the SYSTEM_PROMPT for Mac OS
-if platform.system() == "Darwin":
- SYSTEM_PROMPT += """
-
-* Open applications using Spotlight by using the computer tool to simulate pressing Command+Space, typing the application name, and pressing Enter.
-"""
-
-
-async def respond(
- *,
- model: str = "claude-3-5-sonnet-20241022",
- provider: APIProvider,
- messages: list[BetaMessageParam],
- api_key: str,
- only_n_most_recent_images: int | None = None,
- max_tokens: int = 4096,
- auto_approve: bool = False,
- tools: list[str] = [],
-):
- """
- Agentic sampling loop for the assistant/tool interaction of computer use.
- """
-
- tools = []
- if "interpreter" in tools:
- tools.append(BashTool())
- if "editor" in tools:
- tools.append(EditTool())
- if "gui" in tools:
- tools.append(ComputerTool())
-
- tool_collection = ToolCollection(*tools)
- system = BetaTextBlockParam(
- type="text",
- text=SYSTEM_PROMPT,
- )
-
- while True:
- spinner = yaspin(Spinners.simpleDots, text="")
- spinner.start()
-
- enable_prompt_caching = False
- betas = [COMPUTER_USE_BETA_FLAG]
- image_truncation_threshold = 10
- if provider == APIProvider.ANTHROPIC:
- if api_key:
- client = Anthropic(api_key=api_key)
- else:
- client = Anthropic()
- enable_prompt_caching = True
- elif provider == APIProvider.VERTEX:
- client = AnthropicVertex()
- elif provider == APIProvider.BEDROCK:
- client = AnthropicBedrock()
- else:
- client = Anthropic()
-
- if enable_prompt_caching:
- betas.append(PROMPT_CACHING_BETA_FLAG)
- # _inject_prompt_caching(messages)
- # Is it ever worth it to bust the cache with prompt caching?
- image_truncation_threshold = 50
- system["cache_control"] = {"type": "ephemeral"}
-
- if only_n_most_recent_images:
- _maybe_filter_to_n_most_recent_images(
- messages,
- only_n_most_recent_images,
- min_removal_threshold=image_truncation_threshold,
- )
-
- edit = ToolRenderer()
-
- # Call the API
- # we use raw_response to provide debug information to streamlit. Your
- # implementation may be able call the SDK directly with:
- # `response = client.messages.create(...)` instead.
-
- try:
- use_anthropic = (
- litellm.get_model_info(model)["litellm_provider"] == "anthropic"
- )
- except:
- use_anthropic = False
-
- if use_anthropic:
- # Use Anthropic API which supports betas
- raw_response = client.beta.messages.create(
- max_tokens=max_tokens,
- messages=messages,
- model=model,
- system=system["text"],
- tools=tool_collection.to_params(),
- betas=betas,
- stream=True,
- )
-
- response_content = []
- current_block = None
- first_token = True
-
- for chunk in raw_response:
- if first_token:
- spinner.stop()
- first_token = False
-
- if isinstance(chunk, BetaRawContentBlockStartEvent):
- current_block = chunk.content_block
- elif isinstance(chunk, BetaRawContentBlockDeltaEvent):
- if chunk.delta.type == "text_delta":
- # print(f"{chunk.delta.text}", end="", flush=True)
- md.feed(chunk.delta.text)
- yield {"type": "chunk", "chunk": chunk.delta.text}
- await asyncio.sleep(0)
- if current_block and current_block.type == "text":
- current_block.text += chunk.delta.text
- elif chunk.delta.type == "input_json_delta":
- # Initialize partial_json if needed
- if not hasattr(current_block, "partial_json"):
- current_block.partial_json = ""
- current_block.parsed_json = {}
- current_block.current_key = None
- current_block.current_value = ""
-
- # Add new JSON delta
- current_block.partial_json += chunk.delta.partial_json
-
- # print(chunk.delta.partial_json)
-
- # If name attribute is present on current_block:
- if hasattr(current_block, "name"):
- if edit.name == None:
- edit.name = current_block.name
- edit.feed(chunk.delta.partial_json)
-
- elif isinstance(chunk, BetaRawContentBlockStopEvent):
- edit.close()
- edit = ToolRenderer()
- if current_block:
- if hasattr(current_block, "partial_json"):
- # Finished a tool call
- # print()
- current_block.input = json.loads(current_block.partial_json)
- # yield {"type": "chunk", "chunk": current_block.input}
- delattr(current_block, "partial_json")
- else:
- # Finished a message
- # print("\n")
- md.feed("\n")
- yield {"type": "chunk", "chunk": "\n"}
- await asyncio.sleep(0)
- # Clean up any remaining attributes from partial processing
- if current_block:
- for attr in [
- "partial_json",
- "parsed_json",
- "current_key",
- "current_value",
- ]:
- if hasattr(current_block, attr):
- delattr(current_block, attr)
- response_content.append(current_block)
- current_block = None
-
- response = BetaMessage(
- id=str(uuid.uuid4()),
- content=response_content,
- role="assistant",
- model=model,
- stop_reason=None,
- stop_sequence=None,
- type="message",
- usage={
- "input_tokens": 0,
- "output_tokens": 0,
- }, # Add a default usage dictionary
- )
-
- messages.append(
- {
- "role": "assistant",
- "content": cast(list[BetaContentBlockParam], response.content),
- }
- )
-
- user_approval = None
-
- if auto_approve:
- user_approval = "y"
- else:
- # If not in terminal, break
- if not sys.stdin.isatty():
- # Error out
- print(
- "Error: You appear to be running in a non-interactive environment, so cannot approve tools. Add the `-y` flag to automatically approve tools in non-interactive environments."
- )
- # Exit
- exit(1)
-
- content_blocks = cast(list[BetaContentBlock], response.content)
- tool_use_blocks = [b for b in content_blocks if b.type == "tool_use"]
- if len(tool_use_blocks) > 1:
- print(f"\n\033[38;5;240mRun all actions above\033[0m?")
- user_approval = input("\n(y/n/a): ").lower().strip()
- elif len(tool_use_blocks) == 1:
- auto_approved = False
- if tool_use_blocks[0].name == "str_replace_editor":
- path = tool_use_blocks[0].input.get("path")
- if path.startswith(os.getcwd()):
- path = path[len(os.getcwd()) + 1 :]
- if path == "":
- path = "/"
-
- # Check if path is already approved
- if path in approved_paths:
- user_approval = "y"
- auto_approved = True
- else:
- if tool_use_blocks[0].input.get("command") == "create":
- print(
- f"\n\033[38;5;240mCreate \033[0m{path}\033[38;5;240m?\033[0m"
- )
- elif tool_use_blocks[0].input.get("command") == "view":
- print(
- f"\n\033[38;5;240mView \033[0m{path}\033[38;5;240m?\033[0m"
- )
- elif tool_use_blocks[0].input.get("command") in [
- "str_replace",
- "insert",
- ]:
- print(
- f"\n\033[38;5;240mEdit \033[0m{path}\033[38;5;240m?\033[0m"
- )
- elif tool_use_blocks[0].name == "bash":
- command = tool_use_blocks[0].input.get("command")
- # Check if command is already approved
- if command in approved_commands:
- user_approval = "y"
- auto_approved = True
- else:
- print(f"\n\033[38;5;240mRun code?\033[0m")
- else:
- print(f"\n\033[38;5;240mRun tool?\033[0m")
-
- if not auto_approved:
- user_approval = input("\n(y/n/a): ").lower().strip()
-
- # Add to approved list if 'a' was pressed
- if user_approval == "a":
- if tool_use_blocks[0].name == "str_replace_editor":
- approved_paths.add(path)
- print(
- f"\033[38;5;240mAdded {path} to approved paths\033[0m"
- )
- elif tool_use_blocks[0].name == "bash":
- approved_commands.add(command)
- print(
- f"\033[38;5;240mAdded '{command}' to approved commands\033[0m"
- )
- user_approval = "y"
-
- tool_result_content: list[BetaToolResultBlockParam] = []
- for content_block in cast(list[BetaContentBlock], response.content):
- if content_block.type == "tool_use":
- # Ask user if they want to create the file
- # path = "/tmp/test_file.txt"
- # print(f"\n\033[38;5;240m Create \033[0m\033[1m{path}\033[0m?")
- # response = input(f"\n\033[38;5;240m Create \033[0m\033[1m{path}\033[0m?" + " (y/n): ").lower().strip()
- # Ask user for confirmation before running tool
- edit.close()
-
- if user_approval == "y":
- result = await tool_collection.run(
- name=content_block.name,
- tool_input=cast(dict[str, Any], content_block.input),
- )
- else:
- result = ToolResult(output="Tool execution cancelled by user")
- tool_result_content.append(
- _make_api_tool_result(result, content_block.id)
- )
-
- if user_approval == "n":
- messages.append({"content": tool_result_content, "role": "user"})
- yield {"type": "messages", "messages": messages}
- break
-
- if not tool_result_content:
- # Done!
- yield {"type": "messages", "messages": messages}
- break
-
- if use_anthropic:
- messages.append({"content": tool_result_content, "role": "user"})
- else:
- messages.append({"content": tool_result_content, "role": "tool"})
-
- else:
- # Use Litellm
- tools = [
- {
- "type": "function",
- "function": {
- "name": "bash",
- "description": """Run commands in a bash shell\n
- * When invoking this tool, the contents of the \"command\" parameter does NOT need to be XML-escaped.\n
- * You don't have access to the internet via this tool.\n
- * You do have access to a mirror of common linux and python packages via apt and pip.\n
- * State is persistent across command calls and discussions with the user.\n
- * To inspect a particular line range of a file, e.g. lines 10-25, try 'sed -n 10,25p /path/to/the/file'.\n
- * Please avoid commands that may produce a very large amount of output.\n
- * Please run long lived commands in the background, e.g. 'sleep 10 &' or start a server in the background.""",
- "parameters": {
- "type": "object",
- "properties": {
- "command": {
- "type": "string",
- "description": "The bash command to run.",
- }
- },
- "required": ["command"],
- },
- },
- },
- {
- "type": "function",
- "function": {
- "name": "str_replace_editor",
- "description": """Custom editing tool for viewing, creating and editing files\n
- * If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n
- * The `create` command cannot be used if the specified `path` already exists as a file\n
- * If a `command` generates a long output, it will be truncated and marked with `` \n
- * The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n
- * If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n
- * The `new_str` parameter should contain the edited lines that should replace the `old_str`""",
- "parameters": {
- "type": "object",
- "properties": {
- "command": {
- "description": "The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.",
- "enum": [
- "view",
- "create",
- "str_replace",
- "insert",
- "undo_edit",
- ],
- "type": "string",
- },
- "file_text": {
- "description": "Required parameter of `create` command, with the content of the file to be created.",
- "type": "string",
- },
- "insert_line": {
- "description": "Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.",
- "type": "integer",
- },
- "new_str": {
- "description": "Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.",
- "type": "string",
- },
- "old_str": {
- "description": "Required parameter of `str_replace` command containing the string in `path` to replace.",
- "type": "string",
- },
- "path": {
- "description": "Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.",
- "type": "string",
- },
- "view_range": {
- "description": "Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.",
- "type": "array",
- "items": {"type": "integer"},
- },
- },
- "required": ["command", "path"],
- },
- },
- },
- ]
-
- tools = tools[:1]
-
- if model.startswith("ollama/"):
- stream = False
- # Ollama doesn't support tool calling + streaming
- # Also litellm doesnt.. work?
- actual_model = model.replace("ollama/", "openai/")
- api_base = "http://localhost:11434/v1/"
- else:
- stream = True
- api_base = None
- actual_model = model
-
- params = {
- "model": actual_model,
- "messages": [{"role": "system", "content": system["text"]}] + messages,
- # "tools": tools,
- "stream": stream,
- # "max_tokens": max_tokens,
- "api_base": api_base,
- # "drop_params": True,
- "temperature": 0.0,
- }
-
- raw_response = litellm.completion(**params)
- print(raw_response)
-
- if not stream:
- # Simulate streaming
- raw_response.choices[0].delta = raw_response.choices[0].message
- raw_response = [raw_response]
-
- message = None
- first_token = True
-
- for chunk in raw_response:
- if first_token:
- spinner.stop()
- first_token = False
-
- if message == None:
- message = chunk.choices[0].delta
-
- if chunk.choices[0].delta.content:
- md.feed(chunk.choices[0].delta.content)
- yield {"type": "chunk", "chunk": chunk.choices[0].delta.content}
- await asyncio.sleep(0)
-
- # If the delta == message, we're on the first block, so this content is already in there
- if chunk.choices[0].delta != message:
- message.content += chunk.choices[0].delta.content
- if chunk.choices[0].delta.tool_calls:
- if chunk.choices[0].delta.tool_calls[0].id:
- if message.tool_calls == None or chunk.choices[
- 0
- ].delta.tool_calls[0].id not in [
- t.id for t in message.tool_calls
- ]:
- edit.close()
- edit = ToolRenderer()
- if message.tool_calls == None:
- message.tool_calls = []
- message.tool_calls.append(
- chunk.choices[0].delta.tool_calls[0]
- )
- current_tool_call = [
- t
- for t in message.tool_calls
- if t.id == chunk.choices[0].delta.tool_calls[0].id
- ][0]
-
- if chunk.choices[0].delta.tool_calls[0].function.name:
- tool_name = chunk.choices[0].delta.tool_calls[0].function.name
- if edit.name == None:
- edit.name = tool_name
- if current_tool_call.function.name == None:
- current_tool_call.function.name = tool_name
- if chunk.choices[0].delta.tool_calls[0].function.arguments:
- arguments_delta = (
- chunk.choices[0].delta.tool_calls[0].function.arguments
- )
- edit.feed(arguments_delta)
-
- # If the delta == message, we're on the first block, so this arguments_delta is already in there
- if chunk.choices[0].delta != message:
- current_tool_call.function.arguments += arguments_delta
-
- if chunk.choices[0].finish_reason:
- edit.close()
- edit = ToolRenderer()
-
- messages.append(message)
-
- print()
-
- if not message.tool_calls:
- yield {"type": "messages", "messages": messages}
- break
-
- user_approval = input("\nRun tool(s)? (y/n): ").lower().strip()
-
- for tool_call in message.tool_calls:
- function_arguments = json.loads(tool_call.function.arguments)
-
- if user_approval == "y":
- result = await tool_collection.run(
- name=tool_call.function.name,
- tool_input=cast(dict[str, Any], function_arguments),
- )
- else:
- result = ToolResult(output="Tool execution cancelled by user")
-
- messages.append(
- {
- "role": "tool",
- "content": json.dumps(dataclasses.asdict(result)),
- "tool_call_id": tool_call.id,
- }
- )
-
-
-def _maybe_filter_to_n_most_recent_images(
- messages: list[BetaMessageParam],
- images_to_keep: int,
- min_removal_threshold: int = 5,
-):
- """
- With the assumption that images are screenshots that are of diminishing value as
- the conversation progresses, remove all but the final `images_to_keep` tool_result
- images in place, with a chunk of min_removal_threshold to reduce the amount we
- break the implicit prompt cache.
- """
- if images_to_keep is None:
- return messages
-
- tool_result_blocks = cast(
- list[ToolResultBlockParam],
- [
- item
- for message in messages
- for item in (
- message["content"] if isinstance(message["content"], list) else []
- )
- if isinstance(item, dict) and item.get("type") == "tool_result"
- ],
- )
-
- total_images = sum(
- 1
- for tool_result in tool_result_blocks
- for content in tool_result.get("content", [])
- if isinstance(content, dict) and content.get("type") == "image"
- )
-
- images_to_remove = total_images - images_to_keep
- # for better cache behavior, we want to remove in chunks
- images_to_remove -= images_to_remove % min_removal_threshold
-
- for tool_result in tool_result_blocks:
- if isinstance(tool_result.get("content"), list):
- new_content = []
- for content in tool_result.get("content", []):
- if isinstance(content, dict) and content.get("type") == "image":
- if images_to_remove > 0:
- images_to_remove -= 1
- continue
- new_content.append(content)
- tool_result["content"] = new_content
-
-
-def _response_to_params(
- response: BetaMessage,
-) -> list[BetaTextBlockParam | BetaToolUseBlockParam]:
- res: list[BetaTextBlockParam | BetaToolUseBlockParam] = []
- for block in response.content:
- if isinstance(block, BetaTextBlock):
- res.append({"type": "text", "text": block.text})
- else:
- res.append(cast(BetaToolUseBlockParam, block.model_dump()))
- return res
-
-
-def _inject_prompt_caching(
- messages: list[BetaMessageParam],
-):
- """
- Set cache breakpoints for the 3 most recent turns
- one cache breakpoint is left for tools/system prompt, to be shared across sessions
- """
-
- breakpoints_remaining = 3
- for message in reversed(messages):
- if message["role"] == "user" and isinstance(
- content := message["content"], list
- ):
- if breakpoints_remaining:
- breakpoints_remaining -= 1
- content[-1]["cache_control"] = BetaCacheControlEphemeralParam(
- {"type": "ephemeral"}
- )
- else:
- content[-1].pop("cache_control", None)
- # we'll only every have one extra turn per loop
- break
-
-
-def _make_api_tool_result(
- result: ToolResult, tool_use_id: str
-) -> BetaToolResultBlockParam:
- """Convert an agent ToolResult to an API ToolResultBlockParam."""
- tool_result_content: list[BetaTextBlockParam | BetaImageBlockParam] | str = []
- is_error = False
- if result.error:
- is_error = True
- tool_result_content = _maybe_prepend_system_tool_result(result, result.error)
- else:
- if result.output:
- tool_result_content.append(
- {
- "type": "text",
- "text": _maybe_prepend_system_tool_result(result, result.output),
- }
- )
- if result.base64_image:
- tool_result_content.append(
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/png",
- "data": result.base64_image,
- },
- }
- )
- return {
- "type": "tool_result",
- "content": tool_result_content,
- "tool_use_id": tool_use_id,
- "is_error": is_error,
- }
-
-
-def _maybe_prepend_system_tool_result(result: ToolResult, result_text: str):
- if result.system:
- result_text = f"{result.system}\n{result_text}"
- return result_text
-
-
-async def async_main(args):
- messages = []
- global exit_flag
-
- # Start the mouse position checking thread
- mouse_thread = threading.Thread(target=check_mouse_position)
- mouse_thread.daemon = True
- mouse_thread.start()
-
- while not exit_flag:
- # If is atty, get input from user
- placeholder_color = "ansiblack"
- placeholder_color = "ansigray"
-
- if args["input_message"]:
- user_input = args["input_message"]
- args["input_message"] = None
- elif sys.stdin.isatty():
- placeholder = HTML(
- f'<{placeholder_color}>Use """ for multi-line prompts{placeholder_color}>'
- )
- # placeholder = HTML(' Send a message (/? for help)')
- session = PromptSession()
- # Initialize empty message for multi-line input
- user_input = ""
- if len(messages) < 3:
- first_line = await session.prompt_async("> ", placeholder=placeholder)
- else:
- first_line = input("> ")
-
- # Check if starting multi-line input
- if first_line.strip() == '"""':
- while True:
- placeholder = HTML(
- f'<{placeholder_color}>Use """ again to finish{placeholder_color}>'
- )
- line = await session.prompt_async("", placeholder=placeholder)
- if line.strip().endswith('"""'):
- break
- user_input += line + "\n"
- else:
- user_input = first_line
- print()
- else:
- # Read from stdin when not in terminal
- user_input = sys.stdin.read().strip()
-
- if user_input.lower() in ["exit", "quit", "q"]:
- break
- elif user_input.lower() in ["d"]:
- desktop_prompt()
- continue
-
- messages.append(
- {"role": "user", "content": [{"type": "text", "text": user_input}]}
- )
-
- try:
- async for chunk in respond(
- model=args["model"],
- provider=args.get("provider"),
- messages=messages,
- api_key=args["api_key"],
- auto_approve=args["auto_run"],
- ):
- if chunk["type"] == "messages":
- messages = chunk["messages"]
- except asyncio.CancelledError: # So weird but this happens on the first ctrl C
- continue
- except KeyboardInterrupt: # Then this happens on all subsequent ctrl Cs?
- continue
-
- # If not in terminal, break
- if not sys.stdin.isatty():
- break
-
- print()
-
- # The thread will automatically terminate when the main program exits
-
-
-def run(args):
- if "--server" in sys.argv:
- # Start uvicorn server directly without asyncio.run()
- app = asyncio.run(async_main(args))
- uvicorn.run(app, host="0.0.0.0", port=8000)
- else:
- try:
- asyncio.run(async_main(args))
- except KeyboardInterrupt:
- print()
- pass
-
-
-# Replace the global variables and functions related to mouse tracking
-exit_flag = False
-
-
-def check_mouse_position():
- global exit_flag
- corner_threshold = 10
- screen_width, screen_height = pyautogui.size()
-
- while not exit_flag:
- x, y = pyautogui.position()
- if (
- (x <= corner_threshold and y <= corner_threshold)
- or (x <= corner_threshold and y >= screen_height - corner_threshold)
- or (x >= screen_width - corner_threshold and y <= corner_threshold)
- or (
- x >= screen_width - corner_threshold
- and y >= screen_height - corner_threshold
- )
- ):
- exit_flag = True
- print("\nMouse moved to corner. Exiting...")
- os._exit(0)
- threading.Event().wait(0.1) # Check every 100ms
-
-
-class ChatMessage(BaseModel):
- role: str
- content: str
-
-
-class ChatCompletionRequest(BaseModel):
- messages: List[ChatMessage]
- stream: Optional[bool] = False
diff --git a/archive/mintlify_doc_generator.py b/archive/mintlify_doc_generator.py
deleted file mode 100644
index b947ec75b7..0000000000
--- a/archive/mintlify_doc_generator.py
+++ /dev/null
@@ -1,245 +0,0 @@
-import ast
-import os
-import sys
-from datetime import datetime
-
-
-def get_docstring(node):
- """Get the docstring from an AST node."""
- return ast.get_docstring(node) or ""
-
-
-def process_node(node, depth=0):
- """Process an AST node and return markdown documentation."""
- docs = []
-
- if isinstance(node, ast.ClassDef):
- # Document class
- docs.append(f"## {node.name}\n")
- class_doc = get_docstring(node)
- if class_doc:
- docs.append(f"{class_doc}\n")
-
- # Process class methods
- for item in node.body:
- if isinstance(item, (ast.FunctionDef, ast.AsyncFunctionDef)):
- docs.extend(process_node(item, depth + 1))
-
- elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
- # Document function/method
- method_name = node.name
- if not method_name.startswith("_") or method_name == "__init__":
- docs.append(f"### {method_name}\n")
- func_doc = get_docstring(node)
- if func_doc:
- # Format docstring for parameters and examples
- lines = func_doc.split("\n")
- formatted_lines = []
- in_parameters = False
- in_example = False
-
- for line in lines:
- if line.strip().startswith("Parameters"):
- in_parameters = True
- formatted_lines.append("\n**Parameters**\n")
- elif line.strip().startswith("Example"):
- in_example = True
- formatted_lines.append("\n**Example**\n")
- formatted_lines.append("```python")
- elif in_example and line.strip() == "":
- formatted_lines.append("```\n")
- in_example = False
- elif in_parameters and line.strip() == "":
- in_parameters = False
- formatted_lines.append("")
- elif in_parameters:
- # Format parameter lines
- parts = line.strip().split(":")
- if len(parts) > 1:
- param = parts[0].strip()
- desc = ":".join(parts[1:]).strip()
- formatted_lines.append(f"- `{param}`: {desc}")
- else:
- formatted_lines.append(line)
- else:
- formatted_lines.append(line)
-
- if in_example:
- formatted_lines.append("```\n")
-
- docs.append("\n".join(formatted_lines) + "\n")
-
- return docs
-
-
-def generate_markdown(file_path):
- """Generate Mintlify-compatible MDX documentation for a Python file."""
- try:
- with open(file_path, "r", encoding="utf-8") as f:
- content = f.read()
-
- # Parse the Python file
- tree = ast.parse(content)
-
- # Get module docstring
- module_doc = get_docstring(tree) or ""
-
- # Create frontmatter
- filename = os.path.basename(file_path)
- title = filename.replace(".py", "").replace("_", " ").title()
-
- frontmatter = [
- "---",
- f'title: "{title}"',
- f'description: "Documentation for {filename}"',
- "api: false",
- "---\n",
- ]
-
- # Start with module docstring
- docs = []
- if module_doc:
- docs.append(f"# Overview\n")
- docs.append(f"{module_doc}\n")
-
- # Process all nodes
- for node in tree.body:
- if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)):
- docs.extend(process_node(node))
-
- return "\n".join(frontmatter + docs)
-
- except Exception as e:
- return f"Error processing {file_path}: {str(e)}"
-
-
-def create_mintjson():
- """Create mint.json configuration file."""
- config = {
- "name": "Interpreter 1",
- "logo": {"dark": "/logo/dark.png", "light": "/logo/light.png"},
- "favicon": "/favicon.png",
- "colors": {
- "primary": "#0D9373",
- "light": "#07C983",
- "dark": "#0D9373",
- "anchors": {"from": "#0D9373", "to": "#07C983"},
- },
- "topbarLinks": [
- {
- "name": "GitHub",
- "url": "https://github.com/KillianLucas/open-interpreter",
- }
- ],
- "topbarCtaButton": {
- "name": "Get Started",
- "url": "https://docs.openinterpreter.com/introduction",
- },
- "navigation": [
- {"group": "Getting Started", "pages": ["introduction", "quickstart"]},
- {
- "group": "Core Components",
- "pages": ["interpreter", "cli", "profiles", "server"],
- },
- {
- "group": "Tools",
- "pages": [
- "tools/base",
- "tools/bash",
- "tools/computer",
- "tools/edit",
- "tools/run",
- ],
- },
- {"group": "UI Components", "pages": ["ui/markdown", "ui/tool"]},
- ],
- }
-
- import json
-
- with open("docs/mint.json", "w") as f:
- json.dump(config, f, indent=2)
-
-
-def main():
- # Create docs directory
- os.makedirs("docs", exist_ok=True)
-
- # Create introduction and quickstart
- intro_content = """---
-title: "Introduction"
-description: "Welcome to the Open Interpreter documentation"
----
-
-# Introduction
-
-Open Interpreter is a natural language interface for your computer. It provides an intuitive way to interact with your system using natural language commands.
-
-## Features
-
-- Natural language processing of commands
-- Secure execution environment
-- Multiple language model support
-- Extensible tool system
-"""
-
- quickstart_content = """---
-title: "Quickstart"
-description: "Get started with Open Interpreter"
----
-
-# Quickstart
-
-Get started with Open Interpreter in minutes.
-
-## Installation
-
-```bash
-pip install open-interpreter
-```
-
-## Basic Usage
-
-```python
-from interpreter import Interpreter
-
-interpreter = Interpreter()
-interpreter.chat("Hello, what can you help me with?")
-```
-"""
-
- with open("docs/introduction.md", "w") as f:
- f.write(intro_content)
-
- with open("docs/quickstart.md", "w") as f:
- f.write(quickstart_content)
-
- # Get all Python files in interpreter_1
- base_path = "interpreter_1"
- for root, _, files in os.walk(base_path):
- for file in files:
- if file.endswith(".py"):
- file_path = os.path.join(root, file)
- # Generate markdown
- markdown = generate_markdown(file_path)
- # Create relative output path
- rel_path = os.path.relpath(file_path, base_path)
- output_path = os.path.join("docs", rel_path.replace(".py", ".mdx"))
- # Ensure output directory exists
- os.makedirs(os.path.dirname(output_path), exist_ok=True)
- # Write MDX file
- with open(output_path, "w", encoding="utf-8") as f:
- f.write(markdown)
- print(f"Generated docs for {file_path}")
-
- # Convert introduction and quickstart to .mdx
- os.rename("docs/introduction.md", "docs/introduction.mdx")
- os.rename("docs/quickstart.md", "docs/quickstart.mdx")
-
- # Create mint.json
- create_mintjson()
- print("Generated mint.json configuration")
-
-
-if __name__ == "__main__":
- main()
diff --git a/archive/pyproject.toml b/archive/pyproject.toml
deleted file mode 100644
index e364dd6c3d..0000000000
--- a/archive/pyproject.toml
+++ /dev/null
@@ -1,111 +0,0 @@
-[tool.poetry]
-name = "open-interpreter"
-packages = [
- {include = "interpreter"},
- {include = "scripts"},
- {include = "interpreter_1"},
-]
-version = "0.4.4" # Use "-rc1", "-rc2", etc. for pre-release versions
-description = "Let language models run code"
-authors = ["Killian Lucas "]
-readme = "README.md"
-
-[tool.poetry.dependencies]
-
-# Optional [os] dependencies
-opencv-python = { version = "^4.8.1.78", optional = true }
-plyer = { version = "^2.1.0", optional = true }
-pywinctl = { version = "^0.3", optional = true }
-pytesseract = { version = "^0.3.10", optional = true }
-sentence-transformers = { version = "^2.5.1", optional = true }
-nltk = { version = "^3.8.1", optional = true }
-ipywidgets = { version = "^8.1.2", optional = true }
-torch = { version = "^2.2.1", optional = true }
-timm = { version = "^0.9.16", optional = true }
-
-# Optional [safe] dependencies
-semgrep = { version = "^1.52.0", optional = true }
-
-# Optional [local] dependencies
-transformers = { version = "4.41.2", optional = true }
-einops = { version = "^0.8.0", optional = true }
-torchvision = { version = "^0.18.0", optional = true }
-easyocr = { version = "^1.7.1", optional = true }
-
-# Optional [server] dependencies
-janus = { version = "^1.0.0", optional = true }
-
-# Required dependencies
-python = ">=3.9,<4"
-setuptools = "*"
-astor = "^0.8.1"
-git-python = "^1.0.3"
-inquirer = "^3.1.3"
-pyyaml = "^6.0.1"
-rich = "^13.4.2"
-six = "^1.16.0"
-tokentrim = "^0.1.13"
-wget = "^3.2"
-psutil = "^5.9.6"
-pyreadline3 = {version = "^3.4.1", markers = "sys_platform == 'win32'"}
-html2image = "^2.0.4.3"
-send2trash = "^1.8.2"
-ipykernel = "^6.26.0"
-jupyter-client = "^8.6.0"
-matplotlib = "^3.8.2"
-toml = "^0.10.2"
-tiktoken = "^0.7.0"
-platformdirs = "^4.2.0"
-pydantic = "^2.6.4"
-google-generativeai = "^0.7.1"
-pyperclip = "^1.9.0"
-yaspin = "^3.0.2"
-shortuuid = "^1.0.13"
-litellm = "^1.41.26"
-starlette = ">=0.37.2,<0.42.0"
-html2text = "^2024.2.26"
-selenium = "^4.24.0"
-webdriver-manager = "^4.0.2"
-anthropic = "^0.37.1"
-pyautogui = "^0.9.54"
-typer = "^0.12.5"
-fastapi = "^0.111.0"
-uvicorn = "^0.30.1"
-screeninfo = "^0.8.1"
-pyte = "^0.8.2"
-pygments = "^2.18.0"
-tabulate = "^0.9.0"
-
-[tool.poetry.extras]
-os = ["opencv-python", "pyautogui", "plyer", "pywinctl", "pytesseract", "sentence-transformers", "ipywidgets", "timm"]
-safe = ["semgrep"]
-local = ["opencv-python", "pytesseract", "torch", "transformers", "einops", "torchvision", "easyocr"]
-server = ["fastapi", "janus", "uvicorn"]
-
-[tool.poetry.group.dev.dependencies]
-black = "^23.10.1"
-isort = "^5.12.0"
-pre-commit = "^3.5.0"
-pytest = "^7.4.0"
-sniffio = "^1.3.0"
-websockets = "^13.1"
-pytest-asyncio = "<0.24.0"
-pdoc = "^15.0.0"
-
-[build-system]
-requires = ["poetry-core>=1.0.0"]
-build-backend = "poetry.core.masonry.api"
-
-[tool.poetry.scripts]
-interpreter = "interpreter.terminal_interface.start_terminal_interface:main"
-wtf = "scripts.wtf:main"
-interpreter-classic = "interpreter.terminal_interface.start_terminal_interface:main"
-i = "interpreter_1.cli:main"
-
-[tool.black]
-target-version = ['py311']
-
-[tool.isort]
-profile = "black"
-multi_line_output = 3
-include_trailing_comma = true
diff --git a/archive/server.txt b/archive/server.txt
deleted file mode 100644
index 68df98b3e9..0000000000
--- a/archive/server.txt
+++ /dev/null
@@ -1,90 +0,0 @@
-# Check if running in server mode
- if "--server" in sys.argv:
- app = FastAPI()
-
- # Start the mouse position checking thread when in server mode
- mouse_thread = threading.Thread(target=check_mouse_position)
- mouse_thread.daemon = True
- mouse_thread.start()
-
- # Get API key from environment variable
- api_key = os.environ.get("ANTHROPIC_API_KEY")
- if not api_key:
- raise ValueError(
- "ANTHROPIC_API_KEY environment variable must be set when running in server mode"
- )
-
- @app.post("/openai/chat/completions")
- async def chat_completion(request: ChatCompletionRequest):
- print("BRAND NEW REQUEST")
- # Check exit flag before processing request
- if exit_flag:
- return {"error": "Server shutting down due to mouse in corner"}
-
- async def stream_response():
- # if "claude" not in request.messages[-1].content.lower():
- # print("not claude")
- # # Return early if not a Claude request
- # return
-
- # Instead of creating converted_messages, append the last message to global messages
- global messages
- messages.append(
- {
- "role": request.messages[-1].role,
- "content": [
- {"type": "text", "text": request.messages[-1].content}
- ],
- }
- )
-
- response_chunks = []
-
- async def output_callback(content_block: BetaContentBlock):
- chunk = f"data: {json.dumps({'choices': [{'delta': {'content': content_block.text}}]})}\n\n"
- response_chunks.append(chunk)
- yield chunk
-
- async def tool_output_callback(result: ToolResult, tool_id: str):
- if result.output or result.error:
- content = result.output if result.output else result.error
- chunk = f"data: {json.dumps({'choices': [{'delta': {'content': content}}]})}\n\n"
- response_chunks.append(chunk)
- yield chunk
-
- try:
- yield f"data: {json.dumps({'choices': [{'delta': {'role': 'assistant'}}]})}\n\n"
-
- messages = [m for m in messages if m["content"]]
- # print(str(messages)[-100:])
- # await asyncio.sleep(4)
-
- async for chunk in sampling_loop(
- model=model,
- provider=provider,
- messages=messages, # Now using global messages
- output_callback=output_callback,
- tool_output_callback=tool_output_callback,
- api_key=api_key,
- ):
- print(chunk)
- if chunk["type"] == "chunk":
- await asyncio.sleep(0)
- yield f"data: {json.dumps({'choices': [{'delta': {'content': chunk['chunk']}}]})}\n\n"
- if chunk["type"] == "messages":
- messages = chunk["messages"]
-
- yield f"data: {json.dumps({'choices': [{'delta': {'content': '', 'finish_reason': 'stop'}}]})}\n\n"
-
- except Exception as e:
- print("Error: An exception occurred.")
- print(traceback.format_exc())
- pass
- # raise
- # print(f"Error: {e}")
- # yield f"data: {json.dumps({'error': str(e)})}\n\n"
-
- return StreamingResponse(stream_response(), media_type="text/event-stream")
-
- # Instead of running uvicorn here, we'll return the app
- return app
\ No newline at end of file
diff --git a/archive/sound.py b/archive/sound.py
deleted file mode 100644
index 41b4adf9d1..0000000000
--- a/archive/sound.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import pygame.mixer
-
-# Initialize pygame mixer with smaller buffer for keyboard-like sounds
-pygame.mixer.init(44100, -16, 1, 512)
-
-
-def generate_typing_sound(duration, base_freq, volume):
- sample_rate = 44100
- num_samples = int(duration * sample_rate / 1000)
-
- # Generate a more complex waveform that sounds like a soft key press
- buffer = bytearray()
- for i in range(num_samples):
- # Create attack-decay envelope
- progress = i / num_samples
- if progress < 0.1: # Quick attack
- envelope = progress * 10
- else: # Longer decay
- envelope = (1 - progress) ** 0.5
-
- # Combine multiple frequencies for richer sound
- value = int(
- 2048
- * envelope
- * volume
- * (
- 0.7 * math.sin(2 * math.pi * base_freq * i / sample_rate)
- + 0.2 # Base frequency
- * math.sin(2 * math.pi * (base_freq * 1.5) * i / sample_rate)
- + 0.1 # Overtone
- * math.sin(
- 2 * math.pi * (base_freq * 2) * i / sample_rate
- ) # Higher overtone
- )
- )
-
- buffer.extend(value.to_bytes(2, byteorder="little", signed=True))
-
- return pygame.mixer.Sound(buffer=bytes(buffer))
-
-
-import math
-import random
-
-# Pre-generate a few variations of typing sounds
-typing_sounds = []
-for _ in range(30):
- duration = random.randint(30, 50) # Shorter duration for crisp typing sound
- base_freq = random.randint(100, 8000) # Higher frequencies for key-like sound
- volume = random.uniform(0.3, 0.4) # Lower volume for softer sound
- sound = generate_typing_sound(duration, base_freq, volume)
- typing_sounds.append(sound)
-
-# Play random variations of the typing sounds
-for i in range(100):
- sound = random.choice(typing_sounds)
- sound.play()
- time.sleep(random.uniform(0.01, 0.03)) # More natural typing rhythm
diff --git a/archive/tool copy.py b/archive/tool copy.py
deleted file mode 100644
index 70df5f27ee..0000000000
--- a/archive/tool copy.py
+++ /dev/null
@@ -1,727 +0,0 @@
-import json
-import os
-import random
-import re
-import sys
-
-from pygments import highlight
-from pygments.formatters import Terminal256Formatter
-from pygments.lexers import TextLexer, get_lexer_by_name
-from pygments.styles import get_all_styles
-from yaspin import yaspin
-from yaspin.spinners import Spinners
-
-
-class ContentRenderer:
- def __init__(self, style):
- self.buffer = ""
- self.started = False
- self.style = style
-
- def feed(self, json_obj):
- pass
-
- def flush(self):
- pass
-
-
-class CodeRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- SchemaRenderer.print_separator("┼")
- self.line_number = 1
- self.code_lang = None
- self.buffer = ""
- self.rendered_content = ""
- self.spinner = yaspin(Spinners.simpleDots, text=" ")
- self.is_spinning = False
- self.terminal_width = os.get_terminal_size().columns
- self.prefix_width = 6 # "123 │ " = 6 characters
- self.safety_padding = 4 # Extra padding to prevent edge cases
- self.json_obj = None
-
- def feed(self, json_obj):
- self.json_obj = json_obj
-
- if json_obj.get("name") == "bash":
- content = json_obj.get("command", "")
- self.code_lang = "bash"
- elif json_obj.get("name") == "str_replace_editor":
- content = json_obj.get("file_text", "")
-
- if self.code_lang is None:
- # Derive it from path extension
- extension = (
- json_obj.get("path", "").split(".")[-1]
- if "." in json_obj.get("path", "")
- else ""
- )
- self.code_lang = {
- "py": "python",
- "js": "javascript",
- "ts": "typescript",
- "html": "html",
- "css": "css",
- "json": "json",
- "md": "markdown",
- "sh": "bash",
- "txt": "text",
- }.get(extension, "text")
-
- # Start spinner if we have content to process
- if not self.is_spinning and content.strip():
- self.spinner.start()
- self.is_spinning = True
-
- # Only process the new part of the content
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content # Update what we've seen
-
- # Process complete lines
- if "\n" in self.buffer:
- lines = self.buffer.split("\n")
- for line in lines[:-1]:
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- self._render_line(line)
- if lines[-1].strip(): # If there's more content coming
- self.spinner.start()
- self.is_spinning = True
- self.buffer = lines[-1] # Keep the incomplete line
-
- def _render_line(self, line):
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- formatter = Terminal256Formatter(style=self.style)
- available_width = self.terminal_width - self.prefix_width - self.safety_padding
-
- # Remove ANSI escape sequences for width calculation
- line_no_ansi = re.sub(r"\033\[[0-9;]*[a-zA-Z]", "", line)
-
- # Split long lines before highlighting, accounting for actual visible width
- if len(line_no_ansi) > available_width:
- chunks = []
- pos = 0
- chunk_start = 0
- ansi_offset = 0
-
- while pos < len(line_no_ansi):
- if pos - chunk_start >= available_width:
- # Find actual position in original string including ANSI codes
- real_pos = pos + ansi_offset
- chunks.append(line[chunk_start:real_pos])
- chunk_start = real_pos
- pos += 1
-
- # Count ANSI sequences to maintain offset
- while pos + ansi_offset < len(line):
- if line[pos + ansi_offset] == "\033":
- match = re.match(
- r"\033\[[0-9;]*[a-zA-Z]", line[pos + ansi_offset :]
- )
- if match:
- ansi_offset += len(match.group(0))
- else:
- break
- else:
- break
-
- if chunk_start < len(line):
- chunks.append(line[chunk_start:])
- else:
- chunks = [line]
-
- # Highlight and print first chunk with line number
- line_prefix = f"{SchemaRenderer.GRAY_COLOR}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
- # if self.json_obj and self.json_obj.get("command") == "Open Interpreter":
- # line_prefix = f"{SchemaRenderer.GRAY_COLOR} │ {SchemaRenderer.RESET_COLOR}"
- highlighted = highlight(chunks[0] + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{line_prefix}{highlighted}\n")
- # sys.stdout.write(f"{line_prefix}" + " ".join(highlighted) + "\n") # For debugging
-
- # Print remaining chunks with padding and pipe
- continuation_prefix = (
- f"{SchemaRenderer.GRAY_COLOR} │ {SchemaRenderer.RESET_COLOR}"
- )
- for chunk in chunks[1:]:
- highlighted = highlight(chunk + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{continuation_prefix}{highlighted}\n")
-
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- if self.buffer:
- self._render_line(self.buffer)
- self.buffer = ""
-
- def close(self):
- self.flush()
- SchemaRenderer.print_separator("┴", newline=False)
-
-
-class PathRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- self.rendered_content = ""
- self.json_obj = None
-
- def feed(self, json_obj):
- self.json_obj = json_obj
-
- if json_obj.get("name") == "computer":
- if "coordinate" in json_obj:
- content = json_obj.get("coordinate", "")
- elif "text" in json_obj:
- content = json_obj.get("text", "")
- else:
- content = json_obj.get("path", "")
-
- content = str(content)
-
- # Only render new content
- new_content = content[len(self.rendered_content) :]
- if new_content:
- sys.stdout.write(f"{new_content}")
- sys.stdout.flush()
- self.rendered_content += new_content
-
- def close(self):
- self.flush()
- if self.json_obj and (
- self.json_obj.get("command") == "view"
- or self.json_obj.get("name") == "computer"
- ):
- SchemaRenderer.print_separator("┴", newline=True)
-
-
-class CommandRenderer(ContentRenderer):
- ICONS = {
- "create": "✦",
- "view": "⚆",
- "str_replace": "↻",
- "insert": "➤",
- "undo_edit": "↫",
- "bash": "▶",
- "key": "⌨",
- "type": "⌨",
- "mouse_move": "⇢",
- "left_click": "⊙",
- "left_click_drag": "⇥",
- "right_click": "⊚",
- "middle_click": "⊗",
- "double_click": "⊛",
- "screenshot": "⚆",
- "cursor_position": "⊹",
- "Open Interpreter": "●",
- }
-
- def __init__(self, style):
- super().__init__(style)
- SchemaRenderer.print_separator("┬")
- self.buffer = ""
- self.rendered_commands = set() # Track complete commands we've rendered
- self.json_obj = None
-
- def feed(self, json_obj):
- self.json_obj = json_obj
- if json_obj.get("name") == "bash":
- content = json_obj.get("name", "")
- elif json_obj.get("name") == "str_replace_editor":
- content = json_obj.get("command", "")
- elif json_obj.get("name") == "computer":
- content = json_obj.get("action", "")
-
- # If we've already rendered this complete command, skip
- if content in self.rendered_commands:
- return
-
- # Buffer the content
- self.buffer = content
-
- # If this is a complete command (matches one of our icons), render it
- if content.strip() in self.ICONS:
- icon = self.ICONS.get(content.strip(), "•")
- ICON_COLOR = "\033[37m" # White color
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR} {ICON_COLOR}{icon}\033[0m{SchemaRenderer.GRAY_COLOR} │ {content}{SchemaRenderer.RESET_COLOR} "
- )
- sys.stdout.flush()
- self.rendered_commands.add(content)
- self.buffer = ""
-
- def flush(self):
- pass # No need to flush since we render when we get a complete command
-
- def close(self):
- if self.json_obj and self.json_obj.get("action") == "screenshot":
- SchemaRenderer.print_separator("┴", newline=True)
-
-
-class InsertRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- self.insert_line = None
- self.context_lines = 3
- self.file_content = []
- self.showed_context = False
- self.GREEN_COLOR = "\033[38;5;255m"
- self.RESET_COLOR = "\033[0m"
- self.context_style = "bw"
- self.showed_after_context = False
- self.line_number = 1
- self.rendered_content = ""
- self.is_spinning = False
- self.spinner = yaspin(Spinners.simpleDots, text=" ")
- self.code_lang = "python"
- self.buffer = ""
- self.terminal_width = os.get_terminal_size().columns
- self.prefix_width = 5 # "123 │ " = 6 characters
- self.safety_padding = 2 # Extra padding to prevent edge cases
- self.show_context = True
- self.leading_space = ""
-
- def _load_file_content(self, path):
- """Load file content and return as list of lines"""
- if os.path.exists(path):
- with open(path, "r") as f:
- return f.readlines()
- return []
-
- def _find_insert_line(self, path, specified_line=None, old_str=None):
- """Find the insertion line either from specified line or by finding old_str"""
- if specified_line is not None:
- return specified_line
-
- if old_str is not None:
- file_text = "".join(self.file_content)
- if old_str not in file_text:
- raise ValueError(f"Could not find '{old_str}' in {path}")
- # Find line number by counting newlines before match
- prefix = file_text[: file_text.index(old_str)]
- line_number = prefix.count("\n") + 1
- self.leading_space = prefix[: prefix.find(old_str.lstrip())]
- return line_number
-
- return 1 # Default to first line if neither specified
-
- def feed(self, json_obj):
- path = json_obj.get("path", "")
- content = json_obj.get("new_str", "")
-
- # Initialize context if needed
- if not self.showed_context:
- # Load file content if not already loaded
- if not self.file_content:
- self.file_content = self._load_file_content(path)
-
- # Find insert line position
- self.insert_line = self._find_insert_line(
- path,
- specified_line=json_obj.get("insert_line"),
- old_str=json_obj.get("old_str"),
- )
-
- # Print separator unless we're doing a string replacement
- if "old_str" not in json_obj:
- SchemaRenderer.print_separator("┼")
-
- # Set initial line number and show context
- self.line_number = self.insert_line
-
- if (
- self.show_context and "old_str" not in json_obj
- ): # OldStr would have already shown context
- start_line = max(0, self.insert_line - self.context_lines - 1)
- end_line = min(len(self.file_content), self.insert_line - 1)
- for line in self.file_content[start_line:end_line]:
- self._render_line(line.rstrip(), is_context=True)
-
- self.showed_context = True
-
- # Process the new content
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content
-
- # Process complete lines
- if "\n" in self.buffer:
- lines = self.buffer.split("\n")
- # Render complete lines
- for line in lines[:-1]:
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- self._render_line(line, is_context=False)
- if lines[-1].strip():
- self.spinner.start()
- self.is_spinning = True
- self.buffer = lines[-1]
-
- def _render_line(self, line, is_context=False):
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- available_width = self.terminal_width - self.prefix_width - self.safety_padding
-
- # Split long lines before highlighting/formatting
- if len(line) > available_width:
- chunks = [
- line[i : i + available_width]
- for i in range(0, len(line), available_width)
- ]
- else:
- chunks = [line]
-
- # Prepare first line prefix
- if is_context:
- line_number_color = SchemaRenderer.GRAY_COLOR
- else:
- line_number_color = self.GREEN_COLOR
- line_prefix = f"{line_number_color}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
-
- # Format and print first chunk
- if is_context:
- highlighted = (
- f"{SchemaRenderer.GRAY_COLOR}{chunks[0]}{SchemaRenderer.RESET_COLOR}"
- )
- else:
- formatter = Terminal256Formatter(style=self.style)
- highlighted = highlight(chunks[0] + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{line_prefix}{highlighted}\n")
-
- # Print remaining chunks with padding and pipe
- continuation_prefix = f"{line_number_color} │ {SchemaRenderer.RESET_COLOR}"
- for chunk in chunks[1:]:
- if is_context:
- highlighted = (
- f"{SchemaRenderer.GRAY_COLOR}{chunk}{SchemaRenderer.RESET_COLOR}"
- )
- else:
- highlighted = highlight(chunk + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{continuation_prefix}{highlighted}\n")
-
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- if self.buffer:
- self._render_line(self.buffer)
- self.buffer = ""
-
- # Show ending context if we haven't already
- if (
- self.show_context
- and not self.showed_after_context
- and self.insert_line is not None
- ):
- self.showed_after_context = True
- start_line = self.insert_line - 1
- end_line = min(len(self.file_content), start_line + self.context_lines)
- for line in self.file_content[start_line:end_line]:
- self._render_line(line.rstrip(), is_context=True)
-
- def close(self):
- self.flush()
- SchemaRenderer.print_separator("┴", newline=False)
-
-
-class OldStrRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- SchemaRenderer.print_separator("┼")
- self.RED_COLOR = "\033[39m\033[38;5;204m" # Monokai red
- self.RESET_COLOR = "\033[0m"
- self.rendered_content = ""
- self.line_number = 1
- self.code_lang = "python"
- self.terminal_width = os.get_terminal_size().columns
- self.prefix_width = 6
- self.safety_padding = 4
- self.buffer = "" # Add buffer for line-by-line processing
- self.found_line_number = None
- self.path = None
- self.leading_space = ""
-
- def _find_line_number(self, content, path):
- """Find the line number of content in file and print context"""
- try:
- with open(path, "r") as f:
- file_content = f.read()
- occurrences = file_content.count(content)
- if occurrences == 1:
- # Find line number by counting newlines
- line_idx = file_content.find(content)
- self.found_line_number = file_content[:line_idx].count("\n") + 1
-
- # Print context lines before
- context_lines = 3
- lines_before = file_content[:line_idx].split("\n")[-context_lines:]
- start_line = self.found_line_number - len(lines_before)
- for i, line in enumerate(lines_before):
- line_num = start_line + i
- prefix = f"{SchemaRenderer.GRAY_COLOR}{str(line_num).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
- sys.stdout.write(
- f"{prefix}{SchemaRenderer.GRAY_COLOR}{line}{SchemaRenderer.RESET_COLOR}\n"
- )
- self.line_number = self.found_line_number
- self.leading_space = file_content[:line_idx][
- : line_idx.find(content.lstrip())
- ]
- except:
- self.found_line_number = 1
-
- def feed(self, json_obj):
- content = json_obj.get("old_str", "")
- self.path = json_obj.get("path", "")
-
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content
-
- # If this is our first content, find the line number
- if self.found_line_number is None:
- self._find_line_number(content, self.path)
-
- # Process complete lines
- if "\n" in self.buffer and self.found_line_number is not None:
- lines = self.buffer.split("\n")
- # Process all complete lines
- for line in lines[:-1]:
- self._render_line(line)
- # Keep the incomplete line in the buffer
- self.buffer = lines[-1]
-
- def _render_line(self, line):
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- available_width = self.terminal_width - self.prefix_width - self.safety_padding
-
- # Split long lines before highlighting
- if len(line) > available_width:
- chunks = [
- line[i : i + available_width]
- for i in range(0, len(line), available_width)
- ]
- else:
- chunks = [line]
-
- # Render first chunk with line number
- line_prefix = f"{SchemaRenderer.GRAY_COLOR}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
- sys.stdout.write(
- f"{line_prefix}{self.RED_COLOR}\033[9m{chunks[0]}\033[29m{self.RESET_COLOR}\n"
- )
-
- # Render remaining chunks with continuation prefix
- continuation_prefix = (
- f"{SchemaRenderer.GRAY_COLOR} │ {SchemaRenderer.RESET_COLOR}"
- )
- for chunk in chunks[1:]:
- sys.stdout.write(
- f"{continuation_prefix}{self.RED_COLOR}\033[9m{chunk}\033[29m{self.RESET_COLOR}\n"
- )
-
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.buffer and self.found_line_number is not None:
- self._render_line(self.buffer)
- self.buffer = ""
-
- def close(self):
- # Try to find line number one last time if we haven't found it yet
- if self.found_line_number is None and self.rendered_content and self.path:
- self._find_line_number(self.rendered_content, self.path)
-
- self.flush()
- if self.found_line_number is None:
- print("No line number found")
-
-
-class SchemaRenderer:
- GRAY_COLOR = "\033[38;5;240m"
- RESET_COLOR = "\033[0m"
-
- @staticmethod
- def print_separator(char="─", newline=True, line=True):
- terminal_width = os.get_terminal_size().columns
- if newline:
- sys.stdout.write("\n")
- if line:
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR}────{char}"
- + "─" * (terminal_width - 5)
- + f"{SchemaRenderer.RESET_COLOR}\n"
- )
- else:
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR} {char}{SchemaRenderer.RESET_COLOR}\n"
- )
-
- edit_schemas = {
- "command": {"renderer": CommandRenderer},
- "path": {"renderer": PathRenderer},
- "file_text": {"renderer": CodeRenderer},
- "old_str": {"renderer": OldStrRenderer},
- "new_str": {"renderer": InsertRenderer},
- }
-
- bash_schemas = {
- "name": {"renderer": CommandRenderer},
- "command": {"renderer": CodeRenderer},
- }
-
- computer_schemas = {
- "action": {"renderer": CommandRenderer},
- "text": {"renderer": PathRenderer},
- "coordinate": {"renderer": PathRenderer},
- }
-
-
-class ToolRenderer:
- def __init__(self, name=None):
- self.current_renderers = {}
- self.partial_json = ""
- self.code_style = random.choice(list(get_all_styles()))
- self.code_style = "monokai" # bw
- # print("Style:", self.code_style)
- self.current_schema = None
- self.current_json = None # Store the current parsed JSON state
- self.name = name
-
- def _parse_json(self, json_chunk):
- # Add new chunk to existing buffer
- self.partial_json += json_chunk
-
- # Try to parse the complete buffer first
- try:
- result = json.loads(self.partial_json)
- self.current_json = result # Store the current state
- # Only clear buffer if we successfully parsed the entire thing
- if result.get("end", False):
- self.partial_json = ""
- return result
- except:
- pass
-
- # Rest of the method remains the same for handling incomplete JSON
- new_s = ""
- stack = []
- is_inside_string = False
- escaped = False
-
- # Process each character in the string one at a time.
- for char in self.partial_json:
- if is_inside_string:
- if char == '"' and not escaped:
- is_inside_string = False
- elif char == "\n" and not escaped:
- char = (
- "\\n" # Replace the newline character with the escape sequence.
- )
- elif char == "\\":
- escaped = not escaped
- else:
- escaped = False
- else:
- if char == '"':
- is_inside_string = True
- escaped = False
- elif char == "{":
- stack.append("}")
- elif char == "[":
- stack.append("]")
- elif char == "}" or char == "]":
- if stack and stack[-1] == char:
- stack.pop()
- else:
- # Mismatched closing character; the input is malformed.
- return None
-
- # Append the processed character to the new string.
- new_s += char
-
- # If we're still inside a string at the end of processing, we need to close the string.
- if is_inside_string:
- new_s += '"'
-
- # Close any remaining open structures in the reverse order that they were opened.
- for closing_char in reversed(stack):
- new_s += closing_char
-
- # Attempt to parse the modified string as JSON.
- try:
- result = json.loads(new_s)
- self.current_json = result # Store the current state
- # Only clear buffer if we successfully parsed a complete message
- if result.get("end", False):
- self.partial_json = ""
- return result
- except:
- # Don't print the failure message since it's expected for incomplete JSON
- return None
-
- def feed(self, chunk):
- json_obj = self._parse_json(chunk)
- if not json_obj:
- return
-
- json_obj["name"] = self.name # Pass name into renderers
-
- # Process the JSON object
- schemas = []
- if self.name == "str_replace_editor":
- schemas = SchemaRenderer.edit_schemas.items()
- elif self.name == "bash":
- schemas = SchemaRenderer.bash_schemas.items()
- elif self.name == "computer":
- schemas = SchemaRenderer.computer_schemas.items()
-
- for schema_type, schema in schemas:
- if schema_type in json_obj:
- # If this is a new schema type, initialize it
- if schema_type not in self.current_renderers:
- # Close any existing renderers
- self.close()
- # Initialize the new renderer
- self.current_renderers[schema_type] = schema["renderer"](
- self.code_style
- )
-
- # Feed the entire JSON object to the renderer
- self.current_renderers[schema_type].feed(json_obj)
-
- def close(self):
- # Close any remaining content
- for renderer in self.current_renderers.values():
- if hasattr(renderer, "close"):
- renderer.close()
diff --git a/archive/tool line_numbers.py b/archive/tool line_numbers.py
deleted file mode 100644
index 0f9a062760..0000000000
--- a/archive/tool line_numbers.py
+++ /dev/null
@@ -1,752 +0,0 @@
-import json
-import os
-import random
-import re
-import sys
-
-from pygments import highlight
-from pygments.formatters import Terminal256Formatter
-from pygments.lexers import TextLexer, get_lexer_by_name
-from pygments.styles import get_all_styles
-from yaspin import yaspin
-from yaspin.spinners import Spinners
-
-
-class ContentRenderer:
- def __init__(self, style):
- self.buffer = ""
- self.started = False
- self.style = style
-
- def feed(self, json_obj):
- pass
-
- def flush(self):
- pass
-
-
-class CodeRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- SchemaRenderer.print_separator("┼")
- self.line_number = 1
- self.code_lang = None
- self.buffer = ""
- self.rendered_content = ""
- self.spinner = yaspin(Spinners.simpleDots, text=" ")
- self.is_spinning = False
- self.terminal_width = os.get_terminal_size().columns
- self.prefix_width = 6 # "123 │ " = 6 characters
- self.safety_padding = 4 # Extra padding to prevent edge cases
- self.json_obj = None
-
- def feed(self, json_obj):
- self.json_obj = json_obj
-
- if json_obj.get("name") == "bash":
- content = json_obj.get("command", "")
- self.code_lang = "bash"
- elif json_obj.get("name") == "str_replace_editor":
- content = json_obj.get("file_text", "")
-
- if self.code_lang is None:
- # Derive it from path extension
- extension = (
- json_obj.get("path", "").split(".")[-1]
- if "." in json_obj.get("path", "")
- else ""
- )
- self.code_lang = {
- "py": "python",
- "js": "javascript",
- "ts": "typescript",
- "html": "html",
- "css": "css",
- "json": "json",
- "md": "markdown",
- "sh": "bash",
- "txt": "text",
- }.get(extension, "text")
-
- # Start spinner if we have content to process
- if not self.is_spinning and content.strip():
- self.spinner.start()
- self.is_spinning = True
-
- # Only process the new part of the content
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content # Update what we've seen
-
- # Process complete lines
- if "\n" in self.buffer:
- lines = self.buffer.split("\n")
- for line in lines[:-1]:
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- self._render_line(line)
- if lines[-1].strip(): # If there's more content coming
- self.spinner.start()
- self.is_spinning = True
- self.buffer = lines[-1] # Keep the incomplete line
-
- def _render_line(self, line):
- line = line.encode("utf-8", errors="replace").decode("utf-8")
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- formatter = Terminal256Formatter(style=self.style)
- available_width = self.terminal_width - self.prefix_width - self.safety_padding
-
- # Remove ANSI escape sequences for width calculation
- line_no_ansi = re.sub(r"\033\[[0-9;]*[a-zA-Z]", "", line)
-
- # Split long lines before highlighting, accounting for actual visible width
- if len(line_no_ansi) > available_width:
- chunks = []
- pos = 0
- chunk_start = 0
- ansi_offset = 0
-
- while pos < len(line_no_ansi):
- if pos - chunk_start >= available_width:
- # Find actual position in original string including ANSI codes
- real_pos = pos + ansi_offset
- chunks.append(line[chunk_start:real_pos])
- chunk_start = real_pos
- pos += 1
-
- # Count ANSI sequences to maintain offset
- while pos + ansi_offset < len(line):
- if line[pos + ansi_offset] == "\033":
- match = re.match(
- r"\033\[[0-9;]*[a-zA-Z]", line[pos + ansi_offset :]
- )
- if match:
- ansi_offset += len(match.group(0))
- else:
- break
- else:
- break
-
- if chunk_start < len(line):
- chunks.append(line[chunk_start:])
- else:
- chunks = [line]
-
- # Highlight and print first chunk with line number
- line_prefix = f"{SchemaRenderer.GRAY_COLOR}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
- # if self.json_obj and self.json_obj.get("command") == "Open Interpreter":
- # line_prefix = f"{SchemaRenderer.GRAY_COLOR} │ {SchemaRenderer.RESET_COLOR}"
- highlighted = highlight(chunks[0] + "\n", lexer, formatter).rstrip()
-
- if self.line_number == 0 and highlighted.strip() == "":
- return
-
- sys.stdout.write(f"{line_prefix}{highlighted}\n")
- # sys.stdout.write(f"{line_prefix}" + " ".join(highlighted) + "\n") # For debugging
-
- # Print remaining chunks with padding and pipe
- continuation_prefix = (
- f"{SchemaRenderer.GRAY_COLOR} │ {SchemaRenderer.RESET_COLOR}"
- )
- for chunk in chunks[1:]:
- highlighted = highlight(chunk + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{continuation_prefix}{highlighted}\n")
-
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- if self.buffer:
- self._render_line(self.buffer)
- self.buffer = ""
-
- def close(self):
- self.flush()
- SchemaRenderer.print_separator("┴", newline=False)
-
-
-class PathRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- self.cwd = os.getcwd() + "/"
- self.buffer = ""
- self.json_obj = None
- self.last_printed_pos = 0
- self.diverged = False
-
- def feed(self, json_obj):
- self.json_obj = json_obj
-
- if json_obj.get("name") == "computer":
- if "coordinate" in json_obj:
- content = json_obj.get("coordinate", "")
- elif "text" in json_obj:
- content = json_obj.get("text", "")
- else:
- content = json_obj.get("path", "")
-
- content = str(content)
-
- # Process each new character
- while self.last_printed_pos < len(content):
- curr_char = content[self.last_printed_pos]
-
- # If we haven't diverged yet, check if we're still matching cwd
- if not self.diverged:
- if (
- self.last_printed_pos < len(self.cwd)
- and curr_char != self.cwd[self.last_printed_pos]
- ):
- # We just diverged - print everything from start
- self.diverged = True
- sys.stdout.write(content[: self.last_printed_pos + 1])
- elif self.last_printed_pos >= len(self.cwd):
- # We're past cwd - print just this character
- sys.stdout.write(curr_char)
- else:
- # Already diverged - print each new character
- sys.stdout.write(curr_char)
-
- sys.stdout.flush()
- self.last_printed_pos += 1
-
- def close(self):
- self.flush()
- if self.json_obj and (
- self.json_obj.get("command") == "view"
- or self.json_obj.get("name") == "computer"
- ):
- SchemaRenderer.print_separator("┴", newline=True)
-
-
-class CommandRenderer(ContentRenderer):
- ICONS = {
- "create": "✦",
- "view": "⚆",
- "str_replace": "↻",
- "insert": "➤",
- "undo_edit": "↫",
- "bash": "▶",
- "key": "⌨",
- "type": "⌨",
- "mouse_move": "⇢",
- "left_click": "⊙",
- "left_click_drag": "⇥",
- "right_click": "⊚",
- "middle_click": "⊗",
- "double_click": "⊛",
- "screenshot": "⚆",
- "cursor_position": "⊹",
- "Open Interpreter": "●",
- }
-
- def __init__(self, style):
- super().__init__(style)
- SchemaRenderer.print_separator("┬")
- self.buffer = ""
- self.rendered_commands = set() # Track complete commands we've rendered
- self.json_obj = None
-
- def feed(self, json_obj):
- self.json_obj = json_obj
- if json_obj.get("name") == "bash":
- content = json_obj.get("name", "")
- elif json_obj.get("name") == "str_replace_editor":
- content = json_obj.get("command", "")
- elif json_obj.get("name") == "computer":
- content = json_obj.get("action", "")
-
- # If we've already rendered this complete command, skip
- if content in self.rendered_commands:
- return
-
- # Buffer the content
- self.buffer = content
-
- # If this is a complete command (matches one of our icons), render it
- if content.strip() in self.ICONS:
- icon = self.ICONS.get(content.strip(), "•")
- ICON_COLOR = "\033[37m" # White color
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR} {ICON_COLOR}{icon}\033[0m{SchemaRenderer.GRAY_COLOR} │ {content}{SchemaRenderer.RESET_COLOR} "
- )
- sys.stdout.flush()
- self.rendered_commands.add(content)
- self.buffer = ""
-
- def flush(self):
- pass # No need to flush since we render when we get a complete command
-
- def close(self):
- if self.json_obj and self.json_obj.get("action") == "screenshot":
- SchemaRenderer.print_separator("┴", newline=True)
-
-
-class InsertRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- self.insert_line = None
- self.context_lines = 3
- self.file_content = []
- self.showed_context = False
- self.GREEN_COLOR = "\033[38;5;255m"
- self.RESET_COLOR = "\033[0m"
- self.context_style = "bw"
- self.showed_after_context = False
- self.line_number = 1
- self.rendered_content = ""
- self.is_spinning = False
- self.spinner = yaspin(Spinners.simpleDots, text=" ")
- self.code_lang = "python"
- self.buffer = ""
- self.terminal_width = os.get_terminal_size().columns
- self.prefix_width = 5 # "123 │ " = 6 characters
- self.safety_padding = 2 # Extra padding to prevent edge cases
- self.show_context = True
- self.leading_space = ""
-
- def _load_file_content(self, path):
- """Load file content and return as list of lines"""
- if os.path.exists(path):
- with open(path, "r") as f:
- return f.readlines()
- return []
-
- def _find_insert_line(self, path, specified_line=None, old_str=None):
- """Find the insertion line either from specified line or by finding old_str"""
- if specified_line is not None:
- return specified_line
-
- if old_str is not None:
- file_text = "".join(self.file_content)
- if old_str not in file_text:
- # raise ValueError(f"Could not find '{old_str}' in {path}")
- pass
- # Find line number by counting newlines before match
- prefix = file_text[: file_text.index(old_str)]
- line_number = prefix.count("\n") + 1
- self.leading_space = prefix[: prefix.find(old_str.lstrip())]
- return line_number
-
- return 1 # Default to first line if neither specified
-
- def feed(self, json_obj):
- path = json_obj.get("path", "")
- content = json_obj.get("new_str", "")
-
- # Initialize context if needed
- if not self.showed_context:
- # Load file content if not already loaded
- if not self.file_content:
- self.file_content = self._load_file_content(path)
-
- # Find insert line position
- self.insert_line = self._find_insert_line(
- path,
- specified_line=json_obj.get("insert_line"),
- old_str=json_obj.get("old_str"),
- )
-
- # Print separator unless we're doing a string replacement
- if "old_str" not in json_obj:
- SchemaRenderer.print_separator("┼")
-
- # Set initial line number and show context
- self.line_number = self.insert_line
-
- if (
- self.show_context and "old_str" not in json_obj
- ): # OldStr would have already shown context
- start_line = max(0, self.insert_line - self.context_lines - 1)
- end_line = min(len(self.file_content), self.insert_line - 1)
- for line in self.file_content[start_line:end_line]:
- self._render_line(line.rstrip(), is_context=True)
-
- self.showed_context = True
-
- # Process the new content
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content
-
- # Process complete lines
- if "\n" in self.buffer:
- lines = self.buffer.split("\n")
- # Render complete lines
- for line in lines[:-1]:
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- self._render_line(line, is_context=False)
- if lines[-1].strip():
- self.spinner.start()
- self.is_spinning = True
- self.buffer = lines[-1]
-
- def _render_line(self, line, is_context=False):
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- available_width = self.terminal_width - self.prefix_width - self.safety_padding
-
- # Split long lines before highlighting/formatting
- if len(line) > available_width:
- chunks = [
- line[i : i + available_width]
- for i in range(0, len(line), available_width)
- ]
- else:
- chunks = [line]
-
- # Prepare first line prefix
- if is_context:
- line_number_color = SchemaRenderer.GRAY_COLOR
- else:
- line_number_color = self.GREEN_COLOR
- line_prefix = f"{line_number_color}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
-
- # Format and print first chunk
- if is_context:
- highlighted = (
- f"{SchemaRenderer.GRAY_COLOR}{chunks[0]}{SchemaRenderer.RESET_COLOR}"
- )
- else:
- formatter = Terminal256Formatter(style=self.style)
- highlighted = highlight(chunks[0] + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{line_prefix}{highlighted}\n")
-
- # Print remaining chunks with padding and pipe
- continuation_prefix = f"{line_number_color} │ {SchemaRenderer.RESET_COLOR}"
- for chunk in chunks[1:]:
- if is_context:
- highlighted = (
- f"{SchemaRenderer.GRAY_COLOR}{chunk}{SchemaRenderer.RESET_COLOR}"
- )
- else:
- highlighted = highlight(chunk + "\n", lexer, formatter).rstrip()
- sys.stdout.write(f"{continuation_prefix}{highlighted}\n")
-
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.is_spinning:
- self.spinner.stop()
- self.is_spinning = False
- if self.buffer:
- self._render_line(self.buffer)
- self.buffer = ""
-
- # Show ending context if we haven't already
- if (
- self.show_context
- and not self.showed_after_context
- and self.insert_line is not None
- ):
- self.showed_after_context = True
- start_line = self.insert_line - 1
- end_line = min(len(self.file_content), start_line + self.context_lines)
- for line in self.file_content[start_line:end_line]:
- self._render_line(line.rstrip(), is_context=True)
-
- def close(self):
- self.flush()
- SchemaRenderer.print_separator("┴", newline=False)
-
-
-class OldStrRenderer(ContentRenderer):
- def __init__(self, style):
- super().__init__(style)
- SchemaRenderer.print_separator("┼")
- self.RED_COLOR = "\033[39m\033[38;5;204m" # Monokai red
- self.RESET_COLOR = "\033[0m"
- self.rendered_content = ""
- self.line_number = 1
- self.code_lang = "python"
- self.terminal_width = os.get_terminal_size().columns
- self.prefix_width = 6
- self.safety_padding = 4
- self.buffer = "" # Add buffer for line-by-line processing
- self.found_line_number = None
- self.path = None
- self.leading_space = ""
-
- def _find_line_number(self, content, path):
- """Find the line number of content in file and print context"""
- try:
- with open(path, "r") as f:
- file_content = f.read()
- occurrences = file_content.count(content)
- if occurrences == 1:
- # Find line number by counting newlines
- line_idx = file_content.find(content)
- self.found_line_number = file_content[:line_idx].count("\n") + 1
-
- # Print context lines before
- context_lines = 3
- lines_before = file_content[:line_idx].split("\n")[-context_lines:]
- start_line = self.found_line_number - len(lines_before)
- for i, line in enumerate(lines_before):
- line_num = start_line + i
- prefix = f"{SchemaRenderer.GRAY_COLOR}{str(line_num).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
- sys.stdout.write(
- f"{prefix}{SchemaRenderer.GRAY_COLOR}{line}{SchemaRenderer.RESET_COLOR}\n"
- )
- self.line_number = self.found_line_number
- self.leading_space = file_content[:line_idx][
- : line_idx.find(content.lstrip())
- ]
- except:
- self.found_line_number = 1
-
- def feed(self, json_obj):
- content = json_obj.get("old_str", "")
- self.path = json_obj.get("path", "")
-
- if len(content) <= len(self.rendered_content):
- return
-
- # Get only the new content
- new_content = content[len(self.rendered_content) :]
- self.buffer += new_content
- self.rendered_content = content
-
- # If this is our first content, find the line number
- if self.found_line_number is None:
- self._find_line_number(content, self.path)
-
- # Process complete lines
- if "\n" in self.buffer and self.found_line_number is not None:
- lines = self.buffer.split("\n")
- # Process all complete lines
- for line in lines[:-1]:
- self._render_line(line)
- # Keep the incomplete line in the buffer
- self.buffer = lines[-1]
-
- def _render_line(self, line):
- try:
- lexer = get_lexer_by_name(self.code_lang)
- except:
- lexer = TextLexer()
-
- available_width = self.terminal_width - self.prefix_width - self.safety_padding
-
- # Split long lines before highlighting
- if len(line) > available_width:
- chunks = [
- line[i : i + available_width]
- for i in range(0, len(line), available_width)
- ]
- else:
- chunks = [line]
-
- # Render first chunk with line number
- line_prefix = f"{SchemaRenderer.GRAY_COLOR}{str(self.line_number).rjust(3)} │ {SchemaRenderer.RESET_COLOR}"
- sys.stdout.write(
- f"{line_prefix}{self.RED_COLOR}\033[9m{chunks[0]}\033[29m{self.RESET_COLOR}\n"
- )
-
- # Render remaining chunks with continuation prefix
- continuation_prefix = (
- f"{SchemaRenderer.GRAY_COLOR} │ {SchemaRenderer.RESET_COLOR}"
- )
- for chunk in chunks[1:]:
- sys.stdout.write(
- f"{continuation_prefix}{self.RED_COLOR}\033[9m{chunk}\033[29m{self.RESET_COLOR}\n"
- )
-
- sys.stdout.flush()
- self.line_number += 1
-
- def flush(self):
- if self.buffer and self.found_line_number is not None:
- self._render_line(self.buffer)
- self.buffer = ""
-
- def close(self):
- # Try to find line number one last time if we haven't found it yet
- if self.found_line_number is None and self.rendered_content and self.path:
- self._find_line_number(self.rendered_content, self.path)
-
- self.flush()
- if self.found_line_number is None:
- print("No line number found")
-
-
-class SchemaRenderer:
- GRAY_COLOR = "\033[38;5;240m"
- RESET_COLOR = "\033[0m"
-
- @staticmethod
- def print_separator(char="─", newline=True, line=True):
- terminal_width = os.get_terminal_size().columns
- if newline:
- sys.stdout.write("\n")
- if line:
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR}────{char}"
- + "─" * (terminal_width - 5)
- + f"{SchemaRenderer.RESET_COLOR}\n"
- )
- else:
- sys.stdout.write(
- f"{SchemaRenderer.GRAY_COLOR} {char}{SchemaRenderer.RESET_COLOR}\n"
- )
-
- edit_schemas = {
- "command": {"renderer": CommandRenderer},
- "path": {"renderer": PathRenderer},
- "file_text": {"renderer": CodeRenderer},
- "old_str": {"renderer": OldStrRenderer},
- "new_str": {"renderer": InsertRenderer},
- }
-
- bash_schemas = {
- "name": {"renderer": CommandRenderer},
- "command": {"renderer": CodeRenderer},
- }
-
- computer_schemas = {
- "action": {"renderer": CommandRenderer},
- "text": {"renderer": PathRenderer},
- "coordinate": {"renderer": PathRenderer},
- }
-
-
-class ToolRenderer:
- def __init__(self, name=None):
- self.current_renderers = {}
- self.partial_json = ""
- self.code_style = random.choice(list(get_all_styles()))
- self.code_style = "monokai" # bw
- # print("Style:", self.code_style)
- self.current_schema = None
- self.current_json = None # Store the current parsed JSON state
- self.name = name
-
- def _parse_json(self, json_chunk):
- # Add new chunk to existing buffer
- self.partial_json += json_chunk
-
- # Try to parse the complete buffer first
- try:
- result = json.loads(self.partial_json)
- self.current_json = result # Store the current state
- # Only clear buffer if we successfully parsed the entire thing
- if result.get("end", False):
- self.partial_json = ""
- return result
- except:
- pass
-
- # Rest of the method remains the same for handling incomplete JSON
- new_s = ""
- stack = []
- is_inside_string = False
- escaped = False
-
- # Process each character in the string one at a time.
- for char in self.partial_json:
- if is_inside_string:
- if char == '"' and not escaped:
- is_inside_string = False
- elif char == "\n" and not escaped:
- char = (
- "\\n" # Replace the newline character with the escape sequence.
- )
- elif char == "\\":
- escaped = not escaped
- else:
- escaped = False
- else:
- if char == '"':
- is_inside_string = True
- escaped = False
- elif char == "{":
- stack.append("}")
- elif char == "[":
- stack.append("]")
- elif char == "}" or char == "]":
- if stack and stack[-1] == char:
- stack.pop()
- else:
- # Mismatched closing character; the input is malformed.
- return None
-
- # Append the processed character to the new string.
- new_s += char
-
- # If we're still inside a string at the end of processing, we need to close the string.
- if is_inside_string:
- new_s += '"'
-
- # Close any remaining open structures in the reverse order that they were opened.
- for closing_char in reversed(stack):
- new_s += closing_char
-
- # Attempt to parse the modified string as JSON.
- try:
- result = json.loads(new_s)
- self.current_json = result # Store the current state
- # Only clear buffer if we successfully parsed a complete message
- if result.get("end", False):
- self.partial_json = ""
- return result
- except:
- # Don't print the failure message since it's expected for incomplete JSON
- return None
-
- def feed(self, chunk):
- json_obj = self._parse_json(chunk)
- if not json_obj:
- return
-
- json_obj["name"] = self.name # Pass name into renderers
-
- # Process the JSON object
- schemas = []
- if self.name == "str_replace_editor":
- schemas = SchemaRenderer.edit_schemas.items()
- elif self.name == "bash":
- schemas = SchemaRenderer.bash_schemas.items()
- elif self.name == "computer":
- schemas = SchemaRenderer.computer_schemas.items()
-
- for schema_type, schema in schemas:
- if schema_type in json_obj:
- # If this is a new schema type, initialize it
- if schema_type not in self.current_renderers:
- # Close any existing renderers
- self.close()
- # Initialize the new renderer
- self.current_renderers[schema_type] = schema["renderer"](
- self.code_style
- )
-
- # Feed the entire JSON object to the renderer
- self.current_renderers[schema_type].feed(json_obj)
-
- def close(self):
- # Close any remaining content
- for renderer in self.current_renderers.values():
- if hasattr(renderer, "close"):
- renderer.close()
diff --git a/archive/unused_markdown copy.py b/archive/unused_markdown copy.py
deleted file mode 100644
index 63a51f57df..0000000000
--- a/archive/unused_markdown copy.py
+++ /dev/null
@@ -1,389 +0,0 @@
-import os
-import sys
-from enum import Enum, auto
-from typing import Dict, Optional, Set
-
-from pygments import highlight
-from pygments.formatters import Terminal256Formatter
-from pygments.lexers import TextLexer, get_lexer_by_name
-from yaspin import yaspin
-from yaspin.spinners import Spinners
-
-
-class MarkdownElement(Enum):
- BOLD = "**"
- ITALIC = "*"
- CODE = "`"
- CODE_BLOCK = "```"
- LINK = "["
- HEADER = "#"
-
-
-class MarkdownStreamer:
- def __init__(self):
- # ANSI escape codes
- self.BOLD = "\033[1m"
- self.CODE = "\033[7m" # Regular inline code stays inverted
- self.CODE_BLOCK = "\033[48;5;236m" # Gray background for code blocks
- self.CODE_BLOCK_LINE = (
- "" # Removed the separator line since we'll use background
- )
- self.LINK = "\033[4;34m"
- self.RESET = "\033[0m"
- self.OSC = "\033]8;;"
- self.ST = "\033\\"
-
- # State tracking
- self.buffer = ""
- self.current_element: Optional[MarkdownElement] = None
- self.line_start = True
- self.header_level = 0
- self.backtick_count = 0
- self.code_lang = ""
- self.collecting_lang = False
-
- # Add new state variables for code block handling
- self.in_code_block = False
- self.current_code_line = ""
- self.line_number = 1
-
- # Add spinner (no text, just the spinner)
- self.spinner = yaspin(Spinners.simpleDots, text="")
-
- def write_styled(self, text: str, element: Optional[MarkdownElement] = None):
- """Write text with appropriate styling."""
- if element == MarkdownElement.BOLD:
- sys.stdout.write(f"{self.BOLD}{text}{self.RESET}")
- elif element == MarkdownElement.CODE:
- sys.stdout.write(f"{self.CODE}{text}{self.RESET}")
- elif element == MarkdownElement.CODE_BLOCK:
- # Handle single line of code block
- try:
- lexer = get_lexer_by_name(self.code_lang.strip().lower())
- except:
- lexer = TextLexer()
- formatter = Terminal256Formatter(style="monokai")
- formatted = highlight(text + "\n", lexer, formatter)
- sys.stdout.write(formatted)
- sys.stdout.flush()
- elif element == MarkdownElement.LINK:
- # Extract URL from buffer
- url_start = self.buffer.index("](") + 2
- url = self.buffer[url_start:-1]
- sys.stdout.write(
- f"{self.OSC}{url}{self.ST}{self.LINK}{text}{self.RESET}{self.OSC}{self.ST}"
- )
- elif element == MarkdownElement.HEADER:
- sys.stdout.write(f"{self.BOLD}{text}{self.RESET}")
- else:
- sys.stdout.write(text)
- sys.stdout.flush()
-
- def is_element_complete(self) -> bool:
- """Check if current markdown element is complete."""
- if not self.current_element:
- return False
-
- if self.current_element == MarkdownElement.LINK:
- return ")" in self.buffer and "](" in self.buffer
- elif self.current_element == MarkdownElement.CODE_BLOCK:
- # Look for matching triple backticks
- if self.buffer.startswith("```"):
- # Find the next triple backticks after the start
- rest_of_buffer = self.buffer[3:]
- return "```" in rest_of_buffer
- elif self.current_element == MarkdownElement.CODE:
- # For inline code, look for single backtick
- if self.buffer.startswith("`"):
- # Make sure we don't match with part of a triple backtick
- if not self.buffer.startswith("```"):
- return "`" in self.buffer[1:]
- elif self.current_element == MarkdownElement.BOLD:
- if len(self.buffer) >= 2 and self.buffer.startswith("*"):
- if self.buffer[1] == "*": # It's a bold marker
- return len(self.buffer) >= 4 and self.buffer.endswith("**")
- else: # It's just a single asterisk
- self.write_styled(self.buffer)
- return True
- elif self.current_element == MarkdownElement.HEADER:
- return "\n" in self.buffer
- return False
-
- def handle_complete_element(self):
- """Process and write a complete markdown element."""
- if not self.current_element:
- return
-
- if self.current_element == MarkdownElement.LINK:
- # Extract link text
- text = self.buffer[1 : self.buffer.index("]")]
- self.write_styled(text, MarkdownElement.LINK)
- elif self.current_element == MarkdownElement.CODE_BLOCK:
- content = self.buffer[3:] # Skip opening ```
- end_index = content.index("```")
-
- first_newline = content.find("\n")
- if first_newline != -1 and first_newline < end_index:
- self.code_lang = content[:first_newline]
- text = content[first_newline + 1 : end_index]
- else:
- self.code_lang = ""
- text = content[:end_index]
-
- self.write_styled(text, MarkdownElement.CODE_BLOCK)
- self.code_lang = "" # Reset language
- elif self.current_element == MarkdownElement.CODE:
- # Remove single backticks
- text = self.buffer[1:-1]
- self.write_styled(text, MarkdownElement.CODE)
- elif self.current_element == MarkdownElement.BOLD:
- # Remove ** markers
- text = self.buffer[2:-2]
- self.write_styled(text, MarkdownElement.BOLD)
- elif self.current_element == MarkdownElement.HEADER:
- # Remove # markers and newline
- text = self.buffer[self.header_level :].strip()
- self.write_styled(text, MarkdownElement.HEADER)
- self.write_styled("\n")
-
- self.current_element = None
- self.buffer = ""
- self.header_level = 0
-
- def feed(self, text: str):
- """Process incoming text stream."""
- for char in text:
- # Handle code block line-by-line streaming
- if self.in_code_block:
- if char == "\n":
- if self.collecting_lang:
- self.spinner.start()
- self.spinner.stop() # Stop before any output
- # First newline after ``` - this line contains the language
- self.code_lang = self.current_code_line
- self.collecting_lang = False
- terminal_width = os.get_terminal_size().columns
- sys.stdout.write(
- "\033[38;5;240m\n────┬" + "─" * (terminal_width - 5) + "\n"
- ) # Top line
- sys.stdout.write(
- "\033[38;5;240m │ " + self.code_lang + "\n"
- ) # Language line
- sys.stdout.write(
- "\033[38;5;240m────┼"
- + "─" * (terminal_width - 5)
- + "\033[0m\n"
- ) # Connected line
- self.line_number = 1
- self.current_code_line = ""
- else:
- self.spinner.stop() # Stop before any output
- try:
- lexer = get_lexer_by_name(self.code_lang.strip().lower())
- except:
- lexer = TextLexer()
- formatter = Terminal256Formatter(style="monokai")
-
- terminal_width = os.get_terminal_size().columns
- line_prefix = (
- f"\033[38;5;240m{str(self.line_number).rjust(3)} │ "
- )
- content_width = (
- terminal_width - len(line_prefix) + len("\033[38;5;240m")
- ) # Adjust for ANSI code
-
- if (
- not self.current_code_line.strip()
- ): # Empty or whitespace-only line
- sys.stdout.write(f"{line_prefix}\n")
- else:
- # Split the original line into words before highlighting
- words = self.current_code_line.split(" ")
- current_line = ""
- first_line = True
-
- for word in words:
- test_line = (
- current_line + (" " if current_line else "") + word
- )
- if len(test_line) > content_width:
- # Highlight and write current line
- if first_line:
- formatted = highlight(
- current_line, lexer, formatter
- ).rstrip()
- sys.stdout.write(f"{line_prefix}{formatted}\n")
- first_line = False
- else:
- formatted = highlight(
- current_line, lexer, formatter
- ).rstrip()
- sys.stdout.write(
- f"\033[38;5;240m │ {formatted}\n"
- )
- current_line = word
- else:
- current_line = test_line if current_line else word
-
- # Write any remaining content
- if current_line:
- formatted = highlight(
- current_line, lexer, formatter
- ).rstrip()
- if first_line:
- sys.stdout.write(f"{line_prefix}{formatted}\n")
- else:
- sys.stdout.write(
- f"\033[38;5;240m │ {formatted}\n"
- )
-
- self.line_number += 1
- self.current_code_line = ""
- self.spinner.start() # Start after output
- elif char == "`" and self.current_code_line.endswith("``"):
- self.spinner.stop() # Stop before final output
- if self.current_code_line[:-2]:
- try:
- lexer = get_lexer_by_name(self.code_lang.strip().lower())
- except:
- lexer = TextLexer()
- formatter = Terminal256Formatter(style="monokai")
- formatted = highlight(
- self.current_code_line[:-2], lexer, formatter
- ).rstrip()
- sys.stdout.write(
- f"{str(self.line_number).rjust(4)} │ {formatted}\n"
- )
- terminal_width = os.get_terminal_size().columns
- sys.stdout.write(
- "\033[38;5;240m────┴" + "─" * (terminal_width - 5) + "\033[0m\n"
- )
- sys.stdout.flush()
- self.in_code_block = False
- self.collecting_lang = False
- self.current_code_line = ""
- self.current_element = None
- self.buffer = ""
- else:
- self.current_code_line += char
- continue
-
- # If we're currently processing a markdown element
- if self.current_element:
- self.buffer += char
- if self.is_element_complete():
- self.handle_complete_element()
- continue
-
- # Special handling for backticks
- if char == "`":
- self.backtick_count += 1
-
- if self.backtick_count == 3:
- self.current_element = MarkdownElement.CODE_BLOCK
- self.buffer = "```"
- self.backtick_count = 0
- self.in_code_block = True
- self.collecting_lang = True
- self.line_number = 1
- continue
-
- # If we were counting backticks but got a different character
- if self.backtick_count > 0:
- if self.backtick_count == 1:
- self.current_element = MarkdownElement.CODE
- self.buffer = (
- "`" + char
- ) # Include both the backtick and current char
- else:
- # Write out accumulated backticks as regular text
- self.write_styled("`" * self.backtick_count)
- self.write_styled(char)
- self.backtick_count = 0
- continue
-
- # Check for start of new markdown elements
- if self.line_start and char == "#":
- self.current_element = MarkdownElement.HEADER
- self.header_level += 1
- self.buffer = char
- elif char == "[":
- self.current_element = MarkdownElement.LINK
- self.buffer = char
- elif char == "*":
- self.buffer = char
- self.current_element = MarkdownElement.BOLD
- else:
- # Regular text
- self.write_styled(char)
-
- # Track line starts for headers
- self.line_start = char == "\n"
-
- def reset(self):
- """Reset all state variables to their initial values."""
- self.buffer = ""
- self.current_element = None
- self.line_start = True
- self.header_level = 0
- self.backtick_count = 0
- self.code_lang = ""
- self.collecting_lang = False
- self.in_code_block = False
- self.current_code_line = ""
-
-
-import requests
-
-# Download a large markdown file to test different styles
-url = "https://raw.githubusercontent.com/matiassingers/awesome-readme/master/readme.md"
-url = (
- "https://raw.githubusercontent.com/OpenInterpreter/open-interpreter/main/README.md"
-)
-
-response = requests.get(url)
-markdown_text = response.text
-
-# Get everything after
-markdown_text = markdown_text.split("After install")[1]
-
-markdown_text = (
- """```python
-print("Hello, world!")
-```\n"""
- + markdown_text
-)
-
-
-# Initialize it once
-md = MarkdownStreamer()
-
-# Then feed it characters one at a time. You can do this:
-md.feed("H")
-md.feed("e")
-md.feed("l")
-md.feed("l")
-md.feed("o")
-
-# Or feed from a string:
-import random
-
-i = 0
-import time
-
-while i < len(markdown_text):
- # Random chunk size between 1 and 20
- chunk_size = random.randint(1, 20)
- time.sleep(random.uniform(0.01, 0.3))
- # Get chunk, ensuring we don't go past the end
- chunk = markdown_text[i : min(i + chunk_size, len(markdown_text))]
- # Feed each character in the chunk
- for char in chunk:
- md.feed(char)
- i += chunk_size
-
-# for chunk in markdown_text:
-# md.feed(chunk)
-
-# You can reset it if needed (clears all state)
-md.reset()
diff --git a/archive/unused_markdown.py b/archive/unused_markdown.py
deleted file mode 100644
index c096c07940..0000000000
--- a/archive/unused_markdown.py
+++ /dev/null
@@ -1,348 +0,0 @@
-import os
-import sys
-from enum import Enum, auto
-from typing import Dict, Optional, Set
-
-from pygments import highlight
-from pygments.formatters import Terminal256Formatter
-from pygments.lexers import TextLexer, get_lexer_by_name
-
-
-class MarkdownElement(Enum):
- BOLD = "**"
- ITALIC = "*"
- CODE = "`"
- CODE_BLOCK = "```"
- LINK = "["
- HEADER = "#"
-
-
-class MarkdownStreamer:
- def __init__(self):
- # ANSI escape codes
- self.BOLD = "\033[1m"
- self.CODE = "\033[7m" # Regular inline code stays inverted
- self.CODE_BLOCK = (
- "\033[48;5;234m" # Very subtle dark gray background for code blocks
- )
- self.CODE_BLOCK_LINE = (
- "" # Removed the separator line since we'll use background
- )
- self.LINK = "\033[4;34m"
- self.RESET = "\033[0m"
- self.OSC = "\033]8;;"
- self.ST = "\033\\"
-
- # State tracking
- self.buffer = ""
- self.current_element: Optional[MarkdownElement] = None
- self.line_start = True
- self.header_level = 0
- self.backtick_count = 0
- self.code_lang = ""
- self.collecting_lang = False
-
- # Add new state variables for code block handling
- self.in_code_block = False
- self.current_code_line = ""
- self.line_number = 1
-
- def write_styled(self, text: str, element: Optional[MarkdownElement] = None):
- """Write text with appropriate styling."""
- if element == MarkdownElement.BOLD:
- sys.stdout.write(f"{self.BOLD}{text}{self.RESET}")
- elif element == MarkdownElement.CODE:
- sys.stdout.write(f"{self.CODE}{text}{self.RESET}")
- elif element == MarkdownElement.CODE_BLOCK:
- # Handle single line of code block
- try:
- lexer = get_lexer_by_name(self.code_lang.strip().lower())
- except:
- lexer = TextLexer()
- formatter = Terminal256Formatter(style="monokai")
- formatted = highlight(text + "\n", lexer, formatter)
- sys.stdout.write(formatted)
- sys.stdout.flush()
- elif element == MarkdownElement.LINK:
- # Extract URL from buffer
- url_start = self.buffer.index("](") + 2
- url = self.buffer[url_start:-1]
- sys.stdout.write(
- f"{self.OSC}{url}{self.ST}{self.LINK}{text}{self.RESET}{self.OSC}{self.ST}"
- )
- elif element == MarkdownElement.HEADER:
- sys.stdout.write(f"{self.BOLD}{text}{self.RESET}")
- else:
- sys.stdout.write(text)
- sys.stdout.flush()
-
- def is_element_complete(self) -> bool:
- """Check if current markdown element is complete."""
- if not self.current_element:
- return False
-
- if self.current_element == MarkdownElement.LINK:
- return ")" in self.buffer and "](" in self.buffer
- elif self.current_element == MarkdownElement.CODE_BLOCK:
- # Look for matching triple backticks
- if self.buffer.startswith("```"):
- # Find the next triple backticks after the start
- rest_of_buffer = self.buffer[3:]
- return "```" in rest_of_buffer
- elif self.current_element == MarkdownElement.CODE:
- # For inline code, look for single backtick
- if self.buffer.startswith("`"):
- # Make sure we don't match with part of a triple backtick
- if not self.buffer.startswith("```"):
- return "`" in self.buffer[1:]
- elif self.current_element == MarkdownElement.BOLD:
- if len(self.buffer) >= 2 and self.buffer.startswith("*"):
- if self.buffer[1] == "*": # It's a bold marker
- return len(self.buffer) >= 4 and self.buffer.endswith("**")
- else: # It's just a single asterisk
- self.write_styled(self.buffer)
- return True
- elif self.current_element == MarkdownElement.HEADER:
- return "\n" in self.buffer
- return False
-
- def handle_complete_element(self):
- """Process and write a complete markdown element."""
- if not self.current_element:
- return
-
- if self.current_element == MarkdownElement.LINK:
- # Extract link text
- text = self.buffer[1 : self.buffer.index("]")]
- self.write_styled(text, MarkdownElement.LINK)
- elif self.current_element == MarkdownElement.CODE_BLOCK:
- content = self.buffer[3:] # Skip opening ```
- end_index = content.index("```")
-
- first_newline = content.find("\n")
- if first_newline != -1 and first_newline < end_index:
- self.code_lang = content[:first_newline]
- text = content[first_newline + 1 : end_index]
- else:
- self.code_lang = ""
- text = content[:end_index]
-
- self.write_styled(text, MarkdownElement.CODE_BLOCK)
- self.code_lang = "" # Reset language
- elif self.current_element == MarkdownElement.CODE:
- # Remove single backticks
- text = self.buffer[1:-1]
- self.write_styled(text, MarkdownElement.CODE)
- elif self.current_element == MarkdownElement.BOLD:
- # Remove ** markers
- text = self.buffer[2:-2]
- self.write_styled(text, MarkdownElement.BOLD)
- elif self.current_element == MarkdownElement.HEADER:
- # Remove # markers and newline
- text = self.buffer[self.header_level :].strip()
- self.write_styled(text, MarkdownElement.HEADER)
- self.write_styled("\n")
-
- self.current_element = None
- self.buffer = ""
- self.header_level = 0
-
- def feed(self, text: str):
- """Process incoming text stream."""
- for char in text:
- # Handle code block line-by-line streaming
- if self.in_code_block:
- if char == "\n":
- if self.collecting_lang:
- # First newline after ``` - this line contains the language
- self.code_lang = self.current_code_line
- self.collecting_lang = False
- terminal_width = os.get_terminal_size().columns
- # Print empty line with background
- sys.stdout.write(
- f"\n\n{self.CODE_BLOCK}"
- + " " * terminal_width
- + f"{self.RESET}\n"
- )
- self.current_code_line = ""
- else:
- try:
- lexer = get_lexer_by_name(self.code_lang.strip().lower())
- except:
- lexer = TextLexer()
- formatter = Terminal256Formatter(style="monokai")
-
- terminal_width = os.get_terminal_size().columns
- padding = 2 # Left/right padding
- content_width = terminal_width - (padding * 2)
-
- # Split the original line into words before highlighting
- words = self.current_code_line.split(" ")
- current_line = ""
-
- for word in words:
- test_line = (
- current_line + (" " if current_line else "") + word
- )
- if len(test_line) > content_width:
- # Print current line with background and padding
- formatted = highlight(
- current_line, lexer, formatter
- ).rstrip()
- sys.stdout.write(
- f"{self.CODE_BLOCK} {formatted}"
- + " " * (terminal_width - len(current_line) - 2)
- + f"{self.RESET}\n"
- )
- current_line = word
- else:
- current_line = test_line if current_line else word
-
- # Write any remaining content
- if current_line:
- formatted = highlight(
- current_line, lexer, formatter
- ).rstrip()
- sys.stdout.write(
- f"{self.CODE_BLOCK} {formatted}"
- + " " * (terminal_width - len(current_line) - 2)
- + f"{self.RESET}\n"
- )
-
- self.current_code_line = ""
- elif char == "`" and self.current_code_line.endswith("``"):
- if self.current_code_line[:-2]:
- try:
- lexer = get_lexer_by_name(self.code_lang.strip().lower())
- except:
- lexer = TextLexer()
- formatter = Terminal256Formatter(style="monokai")
- formatted = highlight(
- self.current_code_line[:-2], lexer, formatter
- ).rstrip()
- terminal_width = os.get_terminal_size().columns
- sys.stdout.write(
- f"{self.CODE_BLOCK} {formatted}"
- + " "
- * (terminal_width - len(self.current_code_line[:-2]) - 2)
- + f"{self.RESET}\n"
- )
-
- terminal_width = os.get_terminal_size().columns
- # Print empty line with background
- sys.stdout.write(
- f"{self.CODE_BLOCK}" + " " * terminal_width + f"{self.RESET}\n"
- )
- sys.stdout.flush()
- self.in_code_block = False
- self.collecting_lang = False
- self.current_code_line = ""
- self.current_element = None
- self.buffer = ""
- else:
- self.current_code_line += char
- continue
-
- # If we're currently processing a markdown element
- if self.current_element:
- self.buffer += char
- if self.is_element_complete():
- self.handle_complete_element()
- continue
-
- # Special handling for backticks
- if char == "`":
- self.backtick_count += 1
-
- if self.backtick_count == 3:
- self.current_element = MarkdownElement.CODE_BLOCK
- self.buffer = "```"
- self.backtick_count = 0
- self.in_code_block = True
- self.collecting_lang = True
- self.line_number = 1
- continue
-
- # If we were counting backticks but got a different character
- if self.backtick_count > 0:
- if self.backtick_count == 1:
- self.current_element = MarkdownElement.CODE
- self.buffer = (
- "`" + char
- ) # Include both the backtick and current char
- else:
- # Write out accumulated backticks as regular text
- self.write_styled("`" * self.backtick_count)
- self.write_styled(char)
- self.backtick_count = 0
- continue
-
- # Check for start of new markdown elements
- if self.line_start and char == "#":
- self.current_element = MarkdownElement.HEADER
- self.header_level += 1
- self.buffer = char
- elif char == "[":
- self.current_element = MarkdownElement.LINK
- self.buffer = char
- elif char == "*":
- self.buffer = char
- self.current_element = MarkdownElement.BOLD
- else:
- # Regular text
- self.write_styled(char)
-
- # Track line starts for headers
- self.line_start = char == "\n"
-
- def reset(self):
- """Reset all state variables to their initial values."""
- self.buffer = ""
- self.current_element = None
- self.line_start = True
- self.header_level = 0
- self.backtick_count = 0
- self.code_lang = ""
- self.collecting_lang = False
- self.in_code_block = False
- self.current_code_line = ""
-
-
-import requests
-
-# Download a large markdown file to test different styles
-url = "https://raw.githubusercontent.com/matiassingers/awesome-readme/master/readme.md"
-url = (
- "https://raw.githubusercontent.com/OpenInterpreter/open-interpreter/main/README.md"
-)
-
-response = requests.get(url)
-markdown_text = response.text
-
-markdown_text = markdown_text.split("After install")[1]
-
-# Initialize it once
-md = MarkdownStreamer()
-
-# Or feed from a string:
-import random
-
-i = 0
-import time
-
-while i < len(markdown_text):
- # Random chunk size between 1 and 20
- chunk_size = random.randint(1, 20)
- time.sleep(random.uniform(0.01, 0.3))
- # Get chunk, ensuring we don't go past the end
- chunk = markdown_text[i : min(i + chunk_size, len(markdown_text))]
- # Feed each character in the chunk
- for char in chunk:
- md.feed(char)
- i += chunk_size
-
-# for chunk in markdown_text:
-# md.feed(chunk)
-
-# You can reset it if needed (clears all state)
-md.reset()
diff --git a/archive/wtf copy.py b/archive/wtf copy.py
deleted file mode 100644
index 15ffc954f9..0000000000
--- a/archive/wtf copy.py
+++ /dev/null
@@ -1,474 +0,0 @@
-from yaspin import yaspin
-
-# Start spinner
-spinner = yaspin()
-spinner.start()
-
-# This should actually run ix, but convert the tool it uses into a bash script (e.g. sed if it uses the str tool). Just tell it that its only got one shot. Parallel is fine I guess.
-
-import os
-import platform
-import re
-import subprocess
-import sys
-import time
-
-import platformdirs
-import pyperclip
-import yaml
-
-try:
- from pynput.keyboard import Controller, Key
-except ImportError:
- spinner.stop()
- print("Please run `pip install pynput` to use the `wtf` command.")
- exit()
-
-# Don't let litellm go online here, this slows it down
-os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
-import litellm
-
-# Define system messages
-SYSTEM_MESSAGE = f"""
-You are a fast, efficient terminal assistant. Your task is to:
-
-1. Scan the provided terminal history.
-2. Identify the most recent error or issue.
-3. Take a deep breath, and thoughtfully, carefully determine the most likely solution or debugging step.
-4. Respond with a VERY brief explanation followed by a markdown code block containing a shell command to address the issue.
-
-Rules:
-- Provide a single shell command in your code block, using line continuation characters (\\ for Unix-like systems, ^ for Windows) for multiline commands.
-- Ensure the entire command is on one logical line, requiring the user to press enter only once to execute.
-- If multiple steps are needed, explain the process briefly, then provide only the first command or a combined command using && or ;.
-- Keep any explanatory text extremely brief and concise.
-- Place explanatory text before the code block.
-- NEVER USE COMMENTS IN YOUR CODE.
-- Construct the command with proper escaping: e.g. use sed with correctly escaped quotes to ensure the shell interprets the command correctly. This involves:
- • Using double quotes around the sed expression to handle single quotes within the command.
- • Combining single and double quotes to properly escape characters within the shell command.
-- If previous commands attempted to fix the issue and failed, learn from them by proposing a DIFFERENT command.
-- Focus on the most recent error, ignoring earlier unrelated commands. If the user included a message at the end, focus on helping them.
-- If you need more information to confidently fix the problem, ask the user to run wtf again in a moment, then write a command like grep to learn more about the problem.
-- The error may be as simple as a spelling error, or as complex as requiring tests to be run, or code to be find-and-replaced.
-- Prioritize speed and conciseness in your response. Don't use markdown headings. Don't say more than a sentence or two. Be incredibly concise.
-
-User's System: {platform.system()}
-CWD: {os.getcwd()}
-{"Shell: " + os.environ.get('SHELL') if os.environ.get('SHELL') else ''}
-
-"""
-
-CUSTOM_MESSAGE_SYSTEM_MESSAGE = f"""
-
-You are a fast, efficient AI assistant for terminal and coding tasks. When summoned, you will:
-
-1. Review the provided terminal history (which may or may not be relevant) and final user query.
-2. Determine the most appropriate solution or debugging step to resolve the user's final query.
-3. Respond with a brief explanation and a single shell command in a markdown code block.
-
-Rules:
-- Provide one logical command (use \ or ^ for multiline).
-- Keep explanations concise and place them before the code block.
-- Use proper command escaping (e.g., sed with correct quotes).
-- Avoid comments in the code block.
-- If more info is needed, provide a command to gather it (e.g., grep).
-- Focus on the user's FINAL query and ADDRESS NOTHING ELSE, using terminal history for context if relevant.
-- For multi-step solutions, explain briefly and provide the first or combined command.
-- Prioritize addressing the user's specific request (at the END, after "wtf") efficiently.
-
-User's System: {platform.system()}
-CWD: {os.getcwd()}
-{"Shell: " + os.environ.get('SHELL') if os.environ.get('SHELL') else ''}
-
-"""
-
-LOCAL_SYSTEM_MESSAGE = f"""
-You're a fast AI assistant for terminal issues. You must:
-
-1. Scan terminal history
-2. Identify latest error
-3. Determine best solution
-4. Reply with brief explanation + single shell command in markdown
-
-Rules:
-- One logical command (use \ or ^ for multiline)
-- Explain briefly, then provide command
-- No comments in code
-- Proper escaping (e.g., sed with correct quotes)
-- If unsure, get more info with a command like grep
-- Prioritize speed and conciseness
-
-Example response:
-
-We need to fix the file permissions on config.yml.
-```bash
-chmod 644 config.yml
-```
-
-User's System: {platform.system()}
-CWD: {os.getcwd()}
-{"Shell: " + os.environ.get('SHELL') if os.environ.get('SHELL') else ''}
-
-Now, it's your turn:
-"""
-
-
-def main():
- ### GET OPTIONAL CUSTOM MESSAGE
-
- custom_message = None
- if len(sys.argv) > 1:
- custom_message = "wtf " + " ".join(sys.argv[1:])
-
- ### GET TERMINAL HISTORY
-
- keyboard = Controller()
- history = None
-
- ## SELECT ALL AND COPY METHOD
-
- if True:
- # Save clipboard
- clipboard = pyperclip.paste()
-
- # Select all text
- shortcut_key = Key.cmd if platform.system() == "Darwin" else Key.ctrl
- with keyboard.pressed(shortcut_key):
- keyboard.press("a")
- keyboard.release("a")
-
- # Copy selected text
- with keyboard.pressed(shortcut_key):
- keyboard.press("c")
- keyboard.release("c")
-
- # Deselect
- keyboard.press(Key.backspace)
- keyboard.release(Key.backspace)
-
- # Wait for the clipboard to update
- time.sleep(0.1)
-
- # Get terminal history from clipboard
- history = pyperclip.paste()
-
- # Reset clipboard to stored one
- pyperclip.copy(clipboard)
-
- ## OCR SCREENSHOT METHOD
-
- if not history:
- try:
- import pytesseract
- from PIL import ImageGrab
-
- # Get active window coordinates using platform-specific methods
- platform_name = platform.system()
- if platform_name == "Windows":
- import win32gui
-
- window = win32gui.GetForegroundWindow()
- left, top, right, bottom = win32gui.GetWindowRect(window)
- elif platform_name == "Darwin":
- from Quartz import (
- CGWindowListCopyWindowInfo,
- kCGNullWindowID,
- kCGWindowListOptionOnScreenOnly,
- )
-
- window_info = CGWindowListCopyWindowInfo(
- kCGWindowListOptionOnScreenOnly, kCGNullWindowID
- )
- for window in window_info:
- if window["kCGWindowLayer"] == 0:
- window_geometry = window["kCGWindowBounds"]
- left = window_geometry["X"]
- top = window_geometry["Y"]
- right = int(left + window_geometry["Width"])
- bottom = int(top + window_geometry["Height"])
- break
- else: # Assume it's a Linux-based system
- root = subprocess.Popen(
- ["xprop", "-root", "_NET_ACTIVE_WINDOW"], stdout=subprocess.PIPE
- )
- stdout, stderr = root.communicate()
- m = re.search(b"^_NET_ACTIVE_WINDOW.* ([\\w]+)$", stdout)
- if m is not None:
- window_id = m.group(1)
- window = subprocess.Popen(
- ["xwininfo", "-id", window_id], stdout=subprocess.PIPE
- )
- stdout, stderr = window.communicate()
- match = re.search(
- rb"Absolute upper-left X:\s*(\d+).*Absolute upper-left Y:\s*(\d+).*Width:\s*(\d+).*Height:\s*(\d+)",
- stdout,
- re.DOTALL,
- )
- if match is not None:
- left, top, width, height = map(int, match.groups())
- right = left + width
- bottom = top + height
-
- # spinner.stop()
- # print("\nPermission to capture terminal commands via screenshot -> OCR?")
- # permission = input("(y/n) > ")
- # print("")
- # if permission.lower() != 'y':
- # print("Exiting...")
- # exit()
- # spinner.start()
-
- # Take screenshot of the active window
- screenshot = ImageGrab.grab(
- bbox=(int(left), int(top), int(right), int(bottom))
- )
-
- # OCR the screenshot to get the text
- text = pytesseract.image_to_string(screenshot)
-
- history = text
-
- if "wtf" in history:
- last_wtf_index = history.rindex("wtf")
- history = history[:last_wtf_index]
- except ImportError:
- spinner.stop()
- print(
- "To use OCR to capture terminal output (recommended) run `pip install pytesseract` or `pip3 install pytesseract`."
- )
- spinner.start()
-
- ## TERMINAL HISTORY METHOD
-
- if not history:
- try:
- shell = os.environ.get("SHELL", "/bin/bash")
- command = [shell, "-ic", "fc -ln -10"] # Get just the last command
-
- output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode(
- "utf-8"
- )
-
- # Split the output into lines
- lines = output.strip().split("\n")
-
- # Filter out lines that look like the "saving session" message
- history = [
- line
- for line in lines
- if not line.startswith("...")
- and "saving" not in line
- and "Saving session..." not in line
- ]
- history = [l.strip() for l in history if l.strip()][-10:]
-
- # Split the history into individual commands
-
- # Get the last command
- last_command = history[-1]
- spinner.start()
- print(
- f"\nRunning the last command again to collect its output: {last_command}\n"
- )
- spinner.stop()
- # Run the last command and collect its output
- try:
- last_command_output = subprocess.check_output(
- last_command, shell=True, stderr=subprocess.STDOUT
- ).decode("utf-8")
- except subprocess.CalledProcessError as e:
- last_command_output = e.output.decode("utf-8")
- except Exception as e:
- last_command_output = str(e)
-
- # Format the history
- history = "The user tried to run the following commands:\n" + "\n".join(
- history
- )
- history += f"\nThe last command, {last_command}, resulted in this output:\n{last_command_output}"
-
- except Exception as e:
- raise
- print(
- "Failed to retrieve and run the last command from terminal history. Exiting."
- )
- return
-
- # Trim history
- history = history[-9000:].strip()
-
- # Remove any trailing spinner commands
- spinner_commands = [
- "⠴",
- "⠦",
- "⠇",
- "⠉",
- "⠙",
- "⠸",
- "⠼",
- "⠤",
- "⠴",
- "⠂",
- "⠄",
- "⠈",
- "⠐",
- "⠠",
- ]
- for command in spinner_commands:
- if history.endswith(command):
- history = history[: -len(command)].strip()
- break
-
- if "wtf" in history:
- last_wtf_index = history.rindex("wtf")
- history = history[:last_wtf_index]
-
- ### GET ERROR CONTEXT
-
- # Regex pattern to extract filename and line number
- pattern = r'File "([^"]+)", line (\d+)'
- matches = re.findall(pattern, history)
-
- # Only keep the last X matches
- matches = matches[-1:] # Just the last match, change -1 to get more
-
- # Function to get specified lines from a file
- def get_lines_from_file(filename, line_number):
- lines = []
- try:
- with open(filename, "r") as file:
- all_lines = file.readlines()
- start_line = max(0, line_number - 3) # Preceding lines
- end_line = min(len(all_lines), line_number + 2) # Following lines
- for i in range(start_line, end_line + 1):
- lines.append(f"Line {i+1}: " + all_lines[i].rstrip())
- except Exception as e:
- lines.append(f"Error reading file: {e}")
- return lines
-
- # Create the dictionary with filename, line number, and text
- result = []
- for match in matches:
- filename, line_number = match
- line_number = int(line_number)
- lines = get_lines_from_file(filename, line_number)
- result.append({"filename": filename, "text": "\n".join(lines)})
-
- if result != []:
- history = "Terminal: " + history
-
- # Add context
- for entry in result:
- history = f"""File: {entry["filename"]}\n{entry["text"]}\n\n""" + history
-
- ### PREPARE FOR LLM
-
- # Get LLM model from profile
- default_profile_path = os.path.join(
- platformdirs.user_config_dir("open-interpreter"), "profiles", "default.yaml"
- )
-
- try:
- with open(default_profile_path, "r") as file:
- profile = yaml.safe_load(file)
- wtf_model = profile.get("wtf", {}).get("model")
- if wtf_model:
- model = wtf_model
- else:
- model = profile.get("llm", {}).get("model", "gpt-4o-mini")
- except:
- model = "gpt-4o-mini"
-
- # If they're using a local model (improve this heuristic) use the LOCAL_SYSTEM_MESSAGE
- if "ollama" in model or "llama" in model:
- system_message = LOCAL_SYSTEM_MESSAGE
- else:
- system_message = SYSTEM_MESSAGE
-
- # If they passed in a custom message, use the CUSTOM_MESSAGE_SYSTEM_MESSAGE
- if custom_message:
- system_message = CUSTOM_MESSAGE_SYSTEM_MESSAGE
- user_message = (
- "Answer THIS question by telling me what code to run:"
- + custom_message
- + "\nMy terminal history, which may or may not be relevant, is this:\n---\n"
- + history
- + "\n---\nPlease now answer THIS question by telling me what code to run: "
- + custom_message
- )
- else:
- user_message = (
- history.strip()
- + "\n\nPlease help me resolve the final error above by telling me what code to run in a markdown code block."
- )
-
- # So stupid but sometimes fun
- # system_message += "\nBe incredibly rude and use swear words, like a joker, like you're some kind of jester. You can say 'Fuck' and 'Fucking'. This is for adults. Be funny. USE ALL LOWER CASE. Act like you're an aggreived pair programmer to the idiot that got themselves in this situation."
-
- messages = [
- {"role": "system", "content": system_message.strip()},
- {"role": "user", "content": user_message.strip()},
- ]
-
- # DEBUGGING: PRINT MESSAGES
-
- # print("---")
- # import pprint
- # pprint.pprint(messages)
- # print("---")
- # time.sleep(100)
-
- ### PARSE LLM RESPONSE
-
- in_code = False
- backtick_count = 0
- language_buffer = ""
- started = False
-
- for chunk in litellm.completion(
- model=model, messages=messages, temperature=0, stream=True
- ):
- if not started:
- started = True
- spinner.stop()
- print("")
-
- content = chunk.choices[0].delta.content
- if content:
- for char in content:
- if char == "`":
- backtick_count += 1
- if backtick_count == 3:
- in_code = not in_code
- backtick_count = 0
- language_buffer = ""
- if not in_code: # We've just exited a code block
- time.sleep(0.1)
- print("\n")
- return # Exit after typing the command
- else: # Entered code block
- print("Press `enter` to run: ", end="", flush=True)
- elif in_code:
- if language_buffer is not None:
- if char.isalnum():
- language_buffer += char
- elif char.isspace():
- language_buffer = None
- elif char not in ["\n", "\\"]:
- keyboard.type(char)
- else:
- if backtick_count:
- print("`" * backtick_count, end="", flush=True)
- backtick_count = 0
-
- # if "\n" in char:
- # char.replace("\n", "\n ")
-
- print(char, end="", flush=True)
-
- backtick_count = 0
-
-
-if __name__ == "__main__":
- main()
diff --git a/interpreter_1/README.md b/interpreter/README.md
similarity index 100%
rename from interpreter_1/README.md
rename to interpreter/README.md
diff --git a/interpreter/__init__.py b/interpreter/__init__.py
index 7b9b7bd559..1dd1375de0 100644
--- a/interpreter/__init__.py
+++ b/interpreter/__init__.py
@@ -1,69 +1,42 @@
-import sys
+"""
+Open Interpreter
+===============
-if "--os" in sys.argv:
- from rich import print as rich_print
- from rich.markdown import Markdown
- from rich.rule import Rule
+A natural language interface for your computer.
- def print_markdown(message):
- """
- Display markdown message. Works with multiline strings with lots of indentation.
- Will automatically make single line > tags beautiful.
- """
+Basic Usage
+----------
+>>> from interpreter import Interpreter
+>>> interpreter = Interpreter()
+>>> interpreter.chat("Hello, what can you help me with?")
- for line in message.split("\n"):
- line = line.strip()
- if line == "":
- print("")
- elif line == "---":
- rich_print(Rule(style="white"))
- else:
- try:
- rich_print(Markdown(line))
- except UnicodeEncodeError as e:
- # Replace the problematic character or handle the error as needed
- print("Error displaying line:", line)
+Configuration
+------------
+>>> from interpreter import Interpreter, Config
- if "\n" not in message and message.startswith(">"):
- # Aesthetic choice. For these tags, they need a space below them
- print("")
+# Use defaults
+interpreter = Interpreter()
- import pkg_resources
- import requests
- from packaging import version
+# Load from custom profile
+config = Config.from_file("~/custom_profile.json")
+interpreter = Interpreter(config)
- def check_for_update():
- # Fetch the latest version from the PyPI API
- response = requests.get(f"https://pypi.org/pypi/open-interpreter/json")
- latest_version = response.json()["info"]["version"]
+# Save current settings
+interpreter.save_config("~/my_settings.json")
+"""
- # Get the current version using pkg_resources
- current_version = pkg_resources.get_distribution("open-interpreter").version
+# Use lazy imports to avoid loading heavy modules immediately
+from importlib import import_module
- return version.parse(latest_version) > version.parse(current_version)
- if check_for_update():
- print_markdown(
- "> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
- )
+def __getattr__(name):
+ """Lazy load attributes only when they're actually requested"""
+ if name in ["Interpreter", "Profile"]:
+ if name == "Interpreter":
+ return getattr(import_module(".interpreter", __package__), name)
+ else:
+ return getattr(import_module(".profiles", __package__), name)
+ raise AttributeError(f"module '{__package__}' has no attribute '{name}'")
- if "--voice" in sys.argv:
- print("Coming soon...")
- from ..computer_use.loop import run_async_main
- run_async_main()
- exit()
-
-from .core.async_core import AsyncInterpreter
-from .core.computer.terminal.base_language import BaseLanguage
-from .core.core import OpenInterpreter
-
-interpreter = OpenInterpreter()
-computer = interpreter.computer
-
-# ____ ____ __ __
-# / __ \____ ___ ____ / _/___ / /____ _________ ________ / /____ _____
-# / / / / __ \/ _ \/ __ \ / // __ \/ __/ _ \/ ___/ __ \/ ___/ _ \/ __/ _ \/ ___/
-# / /_/ / /_/ / __/ / / / _/ // / / / /_/ __/ / / /_/ / / / __/ /_/ __/ /
-# \____/ .___/\___/_/ /_/ /___/_/ /_/\__/\___/_/ / .___/_/ \___/\__/\___/_/
-# /_/ /_/
+__all__ = ["Interpreter", "Profile"]
diff --git a/interpreter_1/cli.py b/interpreter/cli.py
similarity index 100%
rename from interpreter_1/cli.py
rename to interpreter/cli.py
diff --git a/interpreter_1/commands.py b/interpreter/commands.py
similarity index 100%
rename from interpreter_1/commands.py
rename to interpreter/commands.py
diff --git a/interpreter_1/interpreter.py b/interpreter/interpreter.py
similarity index 100%
rename from interpreter_1/interpreter.py
rename to interpreter/interpreter.py
diff --git a/interpreter/misc/__init__.py b/interpreter/misc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/interpreter_1/misc/desktop.py b/interpreter/misc/desktop.py
similarity index 100%
rename from interpreter_1/misc/desktop.py
rename to interpreter/misc/desktop.py
diff --git a/interpreter_1/misc/get_input.py b/interpreter/misc/get_input.py
similarity index 100%
rename from interpreter_1/misc/get_input.py
rename to interpreter/misc/get_input.py
diff --git a/interpreter_1/misc/help.py b/interpreter/misc/help.py
similarity index 100%
rename from interpreter_1/misc/help.py
rename to interpreter/misc/help.py
diff --git a/interpreter_1/misc/spinner.py b/interpreter/misc/spinner.py
similarity index 100%
rename from interpreter_1/misc/spinner.py
rename to interpreter/misc/spinner.py
diff --git a/interpreter_1/misc/stream_text.py b/interpreter/misc/stream_text.py
similarity index 100%
rename from interpreter_1/misc/stream_text.py
rename to interpreter/misc/stream_text.py
diff --git a/interpreter_1/misc/welcome.py b/interpreter/misc/welcome.py
similarity index 100%
rename from interpreter_1/misc/welcome.py
rename to interpreter/misc/welcome.py
diff --git a/interpreter_1/profiles.py b/interpreter/profiles.py
similarity index 100%
rename from interpreter_1/profiles.py
rename to interpreter/profiles.py
diff --git a/interpreter_1/server.py b/interpreter/server.py
similarity index 100%
rename from interpreter_1/server.py
rename to interpreter/server.py
diff --git a/interpreter_1/tools/__init__.py b/interpreter/tools/__init__.py
similarity index 100%
rename from interpreter_1/tools/__init__.py
rename to interpreter/tools/__init__.py
diff --git a/interpreter_1/tools/base.py b/interpreter/tools/base.py
similarity index 100%
rename from interpreter_1/tools/base.py
rename to interpreter/tools/base.py
diff --git a/interpreter_1/tools/bash.py b/interpreter/tools/bash.py
similarity index 100%
rename from interpreter_1/tools/bash.py
rename to interpreter/tools/bash.py
diff --git a/interpreter_1/tools/collection.py b/interpreter/tools/collection.py
similarity index 100%
rename from interpreter_1/tools/collection.py
rename to interpreter/tools/collection.py
diff --git a/interpreter_1/tools/computer.py b/interpreter/tools/computer.py
similarity index 100%
rename from interpreter_1/tools/computer.py
rename to interpreter/tools/computer.py
diff --git a/interpreter_1/tools/edit.py b/interpreter/tools/edit.py
similarity index 100%
rename from interpreter_1/tools/edit.py
rename to interpreter/tools/edit.py
diff --git a/interpreter_1/tools/run.py b/interpreter/tools/run.py
similarity index 100%
rename from interpreter_1/tools/run.py
rename to interpreter/tools/run.py
diff --git a/interpreter_1/tools/simple_bash.py b/interpreter/tools/simple_bash.py
similarity index 100%
rename from interpreter_1/tools/simple_bash.py
rename to interpreter/tools/simple_bash.py
diff --git a/interpreter/ui/__init__.py b/interpreter/ui/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/interpreter_1/ui/markdown.py b/interpreter/ui/markdown.py
similarity index 100%
rename from interpreter_1/ui/markdown.py
rename to interpreter/ui/markdown.py
diff --git a/interpreter_1/ui/tool.py b/interpreter/ui/tool.py
similarity index 100%
rename from interpreter_1/ui/tool.py
rename to interpreter/ui/tool.py
diff --git a/interpreter_1/__init__.py b/interpreter_1/__init__.py
deleted file mode 100644
index 1dd1375de0..0000000000
--- a/interpreter_1/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Open Interpreter
-===============
-
-A natural language interface for your computer.
-
-Basic Usage
-----------
->>> from interpreter import Interpreter
->>> interpreter = Interpreter()
->>> interpreter.chat("Hello, what can you help me with?")
-
-Configuration
-------------
->>> from interpreter import Interpreter, Config
-
-# Use defaults
-interpreter = Interpreter()
-
-# Load from custom profile
-config = Config.from_file("~/custom_profile.json")
-interpreter = Interpreter(config)
-
-# Save current settings
-interpreter.save_config("~/my_settings.json")
-"""
-
-# Use lazy imports to avoid loading heavy modules immediately
-from importlib import import_module
-
-
-def __getattr__(name):
- """Lazy load attributes only when they're actually requested"""
- if name in ["Interpreter", "Profile"]:
- if name == "Interpreter":
- return getattr(import_module(".interpreter", __package__), name)
- else:
- return getattr(import_module(".profiles", __package__), name)
- raise AttributeError(f"module '{__package__}' has no attribute '{name}'")
-
-
-__all__ = ["Interpreter", "Profile"]
diff --git a/interpreter_1/cli copy.py b/interpreter_1/cli copy.py
deleted file mode 100644
index b2c980c691..0000000000
--- a/interpreter_1/cli copy.py
+++ /dev/null
@@ -1,288 +0,0 @@
-import sys
-
-if len(sys.argv) > 1 and sys.argv[1] == "--help":
- from .misc.help import help_message
-
- help_message()
- sys.exit(0)
-
-if len(sys.argv) > 1 and sys.argv[1] == "--version":
- # Print version of currently installed interpreter
- # Get this from the package metadata
- from importlib.metadata import version
-
- print("Open Interpreter " + version("open-interpreter"))
- sys.exit(0)
-
-import argparse
-import asyncio
-import os
-from concurrent.futures import ThreadPoolExecutor
-from typing import Any, Dict
-
-from .misc.spinner import SimpleSpinner
-from .profiles import Profile
-
-
-def _parse_list_arg(value: str) -> list:
- """Parse a comma-separated or JSON-formatted string into a list"""
- if not value:
- return []
-
- # Try parsing as JSON first
- if value.startswith("["):
- try:
- import json
-
- return json.loads(value)
- except json.JSONDecodeError:
- pass
-
- # Fall back to comma-separated parsing
- return [item.strip() for item in value.split(",") if item.strip()]
-
-
-def _profile_to_arg_params(profile: Profile) -> Dict[str, Dict[str, Any]]:
- """Convert Profile attributes to argparse parameter definitions"""
- return {
- # Server configuration
- "server": {
- "flags": ["--serve", "-s"],
- "action": "store_true",
- "default": profile.serve,
- "help": "Start the server",
- },
- # Model and API configuration
- "model": {
- "flags": ["--model", "-m"],
- "default": profile.model,
- "help": "Specify the model name",
- },
- "provider": {
- "flags": ["--provider"],
- "default": profile.provider,
- "help": "Specify the API provider",
- },
- "api_base": {
- "flags": ["--api-base", "-b"],
- "default": profile.api_base,
- "help": "Specify the API base URL",
- },
- "api_key": {
- "flags": ["--api-key", "-k"],
- "default": profile.api_key,
- "help": "Specify the API key",
- },
- "api_version": {
- "flags": ["--api-version"],
- "default": profile.api_version,
- "help": "Specify the API version",
- },
- "temperature": {
- "flags": ["--temperature"],
- "default": profile.temperature,
- "help": "Specify the temperature",
- },
- "max_tokens": {
- "flags": ["--max-tokens"],
- "default": profile.max_tokens,
- "help": "Specify the maximum number of tokens",
- },
- # Tool configuration
- "tools": {
- "flags": ["--tools"],
- "default": profile.tools,
- "help": "Specify enabled tools (comma-separated or JSON list)",
- "type": _parse_list_arg,
- },
- "allowed_commands": {
- "flags": ["--allowed-commands"],
- "default": profile.allowed_commands,
- "help": "Specify allowed commands (comma-separated or JSON list)",
- "type": _parse_list_arg,
- },
- "allowed_paths": {
- "flags": ["--allowed-paths"],
- "default": profile.allowed_paths,
- "help": "Specify allowed paths (comma-separated or JSON list)",
- "type": _parse_list_arg,
- },
- "auto_run": {
- "flags": ["--auto-run", "-y"],
- "action": "store_true",
- "default": profile.auto_run,
- "help": "Automatically run tools",
- },
- "tool_calling": {
- "flags": ["--no-tool-calling"],
- "action": "store_false",
- "default": profile.tool_calling,
- "dest": "tool_calling",
- "help": "Disable tool calling (enabled by default)",
- },
- "interactive": {
- "flags": ["--interactive"],
- "action": "store_true",
- "default": profile.interactive,
- "help": "Enable interactive mode (enabled by default)",
- },
- "no_interactive": {
- "flags": ["--no-interactive"],
- "action": "store_false",
- "default": profile.interactive,
- "dest": "interactive",
- "help": "Disable interactive mode",
- },
- # Behavior configuration
- "system_message": {
- "flags": ["--system-message"],
- "default": profile.system_message,
- "help": "Overwrite system message",
- },
- "custom_instructions": {
- "flags": ["--instructions"],
- "default": profile.instructions,
- "help": "Appended to default system message",
- },
- "max_turns": {
- "flags": ["--max-turns"],
- "type": int,
- "default": profile.max_turns,
- "help": "Set maximum conversation turns, defaults to -1 (unlimited)",
- },
- "profile": {
- "flags": ["--profile"],
- "default": profile.profile_path,
- "help": "Path to profile configuration",
- },
- # Debugging
- "debug": {
- "flags": ["--debug", "-d"],
- "action": "store_true",
- "default": profile.debug,
- "help": "Run in debug mode",
- },
- }
-
-
-def parse_args():
- # Create profile with defaults
- profile = Profile()
- # Load from default location if it exists
- default_profile_path = os.path.expanduser(Profile.DEFAULT_PROFILE_PATH)
- if os.path.exists(default_profile_path):
- profile.load(Profile.DEFAULT_PROFILE_PATH)
-
- parser = argparse.ArgumentParser(add_help=False)
-
- # Hidden arguments
- parser.add_argument("--help", "-h", action="store_true", help=argparse.SUPPRESS)
- parser.add_argument("--version", action="store_true", help=argparse.SUPPRESS)
- parser.add_argument("--input", action="store", help=argparse.SUPPRESS)
- parser.add_argument(
- "--profiles", action="store_true", help="Open profiles directory"
- )
-
- # Add arguments programmatically from config
- arg_params = _profile_to_arg_params(profile)
- for param in arg_params.values():
- flags = param.pop("flags")
- parser.add_argument(*flags, **param)
-
- # If second argument exists and doesn't start with '-', treat as input message
- if len(sys.argv) > 1 and not sys.argv[1].startswith("-"):
- return {**vars(parser.parse_args([])), "input": "i " + " ".join(sys.argv[1:])}
-
- args = vars(parser.parse_args())
-
- # Handle profiles flag
- if args["profiles"]:
- profile_dir = os.path.expanduser(Profile.DEFAULT_PROFILE_FOLDER)
- if sys.platform == "win32":
- os.startfile(profile_dir)
- else:
- import subprocess
-
- opener = "open" if sys.platform == "darwin" else "xdg-open"
- subprocess.run([opener, profile_dir])
- sys.exit(0)
-
- # If a different profile is specified, load it
- if args["profile"] != profile.profile_path:
- profile.load(args["profile"])
- # Update any values that weren't explicitly set in CLI
- for key, value in vars(profile).items():
- if key in args and args[key] is None:
- args[key] = value
-
- return args
-
-
-def main():
- args = parse_args()
-
- def load_interpreter():
- global interpreter
- from .interpreter import Interpreter
-
- interpreter = Interpreter()
- # Configure interpreter from args
- for key, value in args.items():
- if hasattr(interpreter, key) and value is not None:
- setattr(interpreter, key, value)
-
- # Check if we should start the server
- if args["serve"]:
- # Load interpreter immediately for server mode
- load_interpreter()
- print("Starting server...")
- interpreter.server()
- return
-
- async def async_load():
- # Load interpreter in background
- with ThreadPoolExecutor() as pool:
- await asyncio.get_event_loop().run_in_executor(pool, load_interpreter)
-
- if args["input"] is None and sys.stdin.isatty():
- if sys.argv[0].endswith("interpreter"):
- from .misc.welcome import welcome_message
-
- welcome_message(args)
- print("\n> ", end="", flush=True)
- try:
- asyncio.run(async_load())
- message = input()
- except KeyboardInterrupt:
- return
- print()
- interpreter.messages = [{"role": "user", "content": message}]
- # Run the generator until completion
- for _ in interpreter.respond():
- pass
- print()
- interpreter.chat()
- else:
- print()
- spinner = SimpleSpinner("")
- spinner.start()
- load_interpreter()
- spinner.stop()
-
- if args["input"] is not None:
- message = args["input"]
- else:
- message = sys.stdin.read().strip()
- interpreter.messages = [{"role": "user", "content": message}]
-
- # Run the generator until completion
- for _ in interpreter.respond():
- pass
- print()
-
- if interpreter.interactive:
- interpreter.chat() # Continue in interactive mode
-
-
-if __name__ == "__main__":
- main()
diff --git a/interpreter_1/misc/get_input copy 2.py b/interpreter_1/misc/get_input copy 2.py
deleted file mode 100644
index 031507439b..0000000000
--- a/interpreter_1/misc/get_input copy 2.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import asyncio
-import fcntl
-import os
-import random
-import sys
-import termios
-
-
-async def get_input(
- placeholder_text=None, placeholder_color: str = "gray", multiline_support=True
-) -> str:
- return input("> ")
- if placeholder_text is None:
- common_placeholders = [
- "How can I help you?",
- ]
- rare_placeholders = [
- 'Use """ for multi-line input',
- "Psst... try the wtf command",
- ]
- very_rare_placeholders = [""]
-
- # 69% common, 30% rare, 1% very rare
- rand = random.random()
- if rand < 0.69:
- placeholder_text = random.choice(common_placeholders)
- elif rand < 0.99:
- placeholder_text = random.choice(rare_placeholders)
- else:
- placeholder_text = random.choice(very_rare_placeholders)
-
- placeholder_text = "Describe command"
-
- # Save terminal settings and set raw mode
- old_settings = termios.tcgetattr(sys.stdin.fileno())
- tty_settings = termios.tcgetattr(sys.stdin.fileno())
- tty_settings[3] = tty_settings[3] & ~(termios.ECHO | termios.ICANON)
- termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, tty_settings)
-
- # Set up non-blocking stdin
- fd = sys.stdin.fileno()
- flags = fcntl.fcntl(fd, fcntl.F_GETFL)
- fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
-
- COLORS = {
- "gray": "\033[90m",
- "red": "\033[91m",
- "green": "\033[92m",
- "yellow": "\033[93m",
- "blue": "\033[94m",
- "magenta": "\033[95m",
- "cyan": "\033[96m",
- "white": "\033[97m",
- }
- RESET = "\033[0m"
-
- current_input = []
- show_placeholder = True
-
- def redraw():
- sys.stdout.write("\r\033[K") # Clear line
- if multiline_support:
- sys.stdout.write("\r> ")
- if current_input:
- sys.stdout.write("".join(current_input))
- elif show_placeholder:
- color_code = COLORS.get(placeholder_color.lower(), COLORS["gray"])
- sys.stdout.write(f"{color_code}{placeholder_text}{RESET}")
- if multiline_support:
- sys.stdout.write("\r> ")
- sys.stdout.flush()
-
- try:
- redraw()
- while True:
- try:
- char = os.read(fd, 1).decode()
-
- if char == "\n":
- if current_input:
- result = "".join(current_input)
- # Multiline support
- if multiline_support and result.startswith('"""'):
- while True:
- print()
- extra_input = await get_input(multiline_support=False)
- if extra_input.endswith('"""'):
- result += extra_input
- return result
- else:
- result += extra_input
- else:
- return result
- else:
- redraw()
- elif char == "\x7f": # Backspace
- if current_input:
- current_input.pop()
- if not current_input:
- show_placeholder = True
- elif char == "\x03": # Ctrl+C
- raise KeyboardInterrupt
- elif char and char.isprintable():
- current_input.append(char)
- show_placeholder = False
- redraw()
- except BlockingIOError:
- pass
-
- finally:
- termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
- print()
diff --git a/interpreter_1/misc/get_input copy.py b/interpreter_1/misc/get_input copy.py
deleted file mode 100644
index 9dafdb53ea..0000000000
--- a/interpreter_1/misc/get_input copy.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import asyncio
-import fcntl
-import os
-import sys
-import termios
-
-
-async def get_input(
- placeholder_text: str = "Testing", placeholder_color: str = "gray"
-) -> str:
- # Save terminal settings and set raw mode
- old_settings = termios.tcgetattr(sys.stdin.fileno())
- tty_settings = termios.tcgetattr(sys.stdin.fileno())
- tty_settings[3] = tty_settings[3] & ~(termios.ECHO | termios.ICANON)
- termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, tty_settings)
-
- # Set up non-blocking stdin
- fd = sys.stdin.fileno()
- flags = fcntl.fcntl(fd, fcntl.F_GETFL)
- fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
-
- COLORS = {
- "gray": "\033[90m",
- "red": "\033[91m",
- "green": "\033[92m",
- "yellow": "\033[93m",
- "blue": "\033[94m",
- "magenta": "\033[95m",
- "cyan": "\033[96m",
- "white": "\033[97m",
- }
- RESET = "\033[0m"
-
- current_input = []
- show_placeholder = True
-
- def redraw():
- sys.stdout.write("\r\033[K") # Clear line
- sys.stdout.write("\r> ")
- if current_input:
- sys.stdout.write("".join(current_input))
- elif show_placeholder:
- color_code = COLORS.get(placeholder_color.lower(), COLORS["gray"])
- sys.stdout.write(f"{color_code}{placeholder_text}{RESET}")
- sys.stdout.write("\r> ")
- sys.stdout.flush()
-
- try:
- redraw()
- while True:
- try:
- char = os.read(fd, 1).decode()
-
- if char == "\n":
- if current_input:
- result = "".join(current_input)
- return result
- else:
- redraw()
- elif char == "\x7f": # Backspace
- if current_input:
- current_input.pop()
- if not current_input:
- show_placeholder = True
- elif char == "\x03": # Ctrl+C
- raise KeyboardInterrupt
- elif char and char.isprintable():
- current_input.append(char)
- show_placeholder = False
- redraw()
- except BlockingIOError:
- pass
-
- finally:
- termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
- print()
diff --git a/interpreter_1/misc/user_input copy.py b/interpreter_1/misc/user_input copy.py
deleted file mode 100644
index f649bb348c..0000000000
--- a/interpreter_1/misc/user_input copy.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# from prompt_toolkit import PromptSession
-# from prompt_toolkit.formatted_text import HTML
-# import os
-
-
-def get_user_input(
- placeholder_text: str = "", placeholder_color: str = "ansigray", prompt_session=None
-) -> str:
- """
- Get user input with support for multi-line input and fallback to standard input.
-
- Args:
- placeholder_text: Text to show as placeholder
- placeholder_color: Color of the placeholder text
- prompt_session: Optional PromptSession instance to use
-
- Returns:
- The user's input as a string
- """
- return input("> ")
- # Create placeholder HTML
- placeholder = HTML(f"<{placeholder_color}>{placeholder_text}{placeholder_color}>")
-
- # Use provided prompt session or create new one
- if prompt_session is None:
- prompt_session = PromptSession()
-
- try:
- # Prompt toolkit requires terminal size to work properly
- # If this fails, prompt toolkit will look weird, so we fall back to standard input
- os.get_terminal_size()
- user_input = prompt_session.prompt(
- "> ",
- placeholder=placeholder,
- ).strip()
- except KeyboardInterrupt:
- raise
- except:
- user_input = input("> ").strip()
- print()
-
- # Handle multi-line input
- if user_input == '"""':
- user_input = ""
- while True:
- placeholder = HTML(
- f'<{placeholder_color}>Use """ again to finish{placeholder_color}>'
- )
- line = prompt_session.prompt("", placeholder=placeholder).strip()
- if line == '"""':
- break
- user_input += line + "\n"
- print()
-
- return user_input
diff --git a/interpreter_1/misc/welcome copy.py b/interpreter_1/misc/welcome copy.py
deleted file mode 100644
index fbd041c1af..0000000000
--- a/interpreter_1/misc/welcome copy.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import os
-import random
-import time
-
-from ..ui.markdown import MarkdownRenderer
-from .stream_text import stream_text
-
-
-def welcome_message(args):
- print()
- renderer = MarkdownRenderer()
-
- import random
-
- tips = [
- # "You can type `i` in your terminal to use Open Interpreter.",
- "**Tip:** Type `wtf` in your terminal to instantly fix the last error.",
- # "**Tip:** Type `wtf` in your terminal to have Open Interpreter fix the last error.",
- '**Tip:** You can paste content into Open Interpreter by typing `"""` first.',
- # "**Tip:** Type prompts after `i` in your terminal, for example, `i want deno`.",
- "**Tip:** You can type `i [your prompt]` directly into your terminal, e.g. `i want a venv`.", # \n\nThese are all valid commands: `i want deno`, `i dont understand`, `i want a venv`",
- # "**Tip:** Type your prompt directly into your CLI by starting with `i `, like `i want node`.", # \n\nThese are all valid commands: `i want deno`, `i dont understand`, `i want a venv`",
- # "Our desktop app provides the best experience. Type `d` for early access.",
- # "**Tip:** Reduce display resolution for better performance.",
- ]
-
- random_tip = random.choice(tips)
-
- model = args["model"]
-
- if model == "claude-3-5-sonnet-20241022":
- model = "CLAUDE-3.5-SONNET"
-
- model = f"` ✳ {model.upper()} `" # {"-" * (terminal_width - len(model))} # ⎇
-
- if args["tool_calling"] == False:
- args["tools"] = ["interpreter"]
-
- tool_displays = []
- for tool in ["interpreter", "editor", "gui"]:
- if args["tools"] and tool in args["tools"]:
- if tool == "interpreter":
- tool_displays.append("` ❯ INTERPRETER `")
- elif tool == "editor":
- tool_displays.append("` ❚ FILE EDITOR `")
- elif tool == "gui":
- tool_displays.append("` ✳ GUI CONTROL `")
- else:
- if tool == "interpreter":
- tool_displays.append(" " * len(" ❯ INTERPRETER "))
- elif tool == "editor":
- tool_displays.append(" " * len(" ❚ FILE EDITOR "))
- elif tool == "gui":
- tool_displays.append(" " * len(" ✳ GUI CONTROL "))
-
- # Sort tool_displays so that empty tools are at the end
- tool_displays = sorted(
- tool_displays, key=lambda x: x == " " * len(" ❯ INTERPRETER ")
- )
-
- auto_run_display = (
- "` ! AUTOMATIC (UNSAFE) `" if args["auto_run"] else "` ? REQUIRES PERMISSION `"
- )
-
- gap = 8
-
- markdown_text = f"""**MODEL**{" "*(len(model)-2+gap-len("MODEL"))}**TOOLS**
-{model}{" "*gap}{tool_displays[0]}
-**TOOL EXECUTION**{" "*(len(model)-2+gap-len("TOOL EXECUTION"))}{tool_displays[1]}
-{auto_run_display}{" "*(len(model)+gap-len(auto_run_display))}{tool_displays[2]}
-
-{random_tip}
-
-"""
-
- """
- **Warning:** This AI has full system access and can modify files, install software, and execute commands. By continuing, you accept all risks and responsibility.
-
- Move your mouse to any corner of the screen to exit.
- """
-
- # for chunk in stream_text(markdown_text, max_chunk=1, min_delay=0.0001, max_delay=0.001):
- # renderer.feed(chunk)
-
- renderer.feed(markdown_text)
-
- renderer.close()
-
-
-# ⧳ ❚ ❯ ✦ ⬤ ● ▶ ⚈ ⌖ ⎋ ⬤ ◉ ⎇
diff --git a/pyproject.toml b/pyproject.toml
index 83d70173a7..3cd1aaf9a2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,9 +3,8 @@ name = "open-interpreter"
packages = [
{include = "interpreter"},
{include = "scripts"},
- {include = "interpreter_1"},
]
-version = "0.4.4" # Use "-rc1", "-rc2", etc. for pre-release versions
+version = "1.0.0" # Use "-rc1", "-rc2", etc. for pre-release versions
description = "A natural language interface for computers"
authors = ["Killian Lucas "]
readme = "README.md"
@@ -32,14 +31,11 @@ requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
-i = "interpreter_1.cli:main"
-interpreter = "interpreter_1.cli:main"
-
+i = "interpreter.cli:main"
+interpreter = "interpreter.cli:main"
interpreter-shell = "scripts.shell:main"
interpreter-uninstall-shell = "scripts.uninstall_shell:main"
-
wtf = "scripts.wtf:main"
-interpreter-classic = "interpreter.terminal_interface.start_terminal_interface:main"
[tool.black]
target-version = ['py311']
From c3658660d52f5cae3b26c8da299f40b0cb9e2cd1 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Mon, 2 Dec 2024 09:58:32 -0800
Subject: [PATCH 59/91] New README draft
---
README.md | 432 +++++++-----------------------------------------------
1 file changed, 56 insertions(+), 376 deletions(-)
diff --git a/README.md b/README.md
index ad38450680..e43d7f4ed8 100644
--- a/README.md
+++ b/README.md
@@ -1,411 +1,91 @@
-● Open Interpreter
+# Open Interpreter
-
-
-
-
-
-
-
-
-
-
-
Get early access to the desktop app | Documentation
-
+A modern command line assistant.
-
+[Documentation](https://docs.openinterpreter.com/) | [Discord](https://discord.gg/Hvz9Axh84z)
-
+## Install
-
-