Skip to content

Commit

Permalink
simple draft
Browse files Browse the repository at this point in the history
  • Loading branch information
WHALEEYE committed Oct 29, 2024
1 parent 3bc649b commit 2d3e913
Show file tree
Hide file tree
Showing 4 changed files with 364 additions and 242 deletions.
151 changes: 63 additions & 88 deletions gui/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,102 +12,77 @@
# limitations under the License.
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
import warnings
from pathlib import Path
from uuid import uuid4

from crab import (
BenchmarkConfig,
create_benchmark,
)
from crab.actions.crab_actions import complete
from crab.actions.visual_prompt_actions import (
get_elements_prompt,
groundingdino_easyocr,
)
from crab.environments.macos import mac_env
from gui.envs import ANDROID_ENV, UBUNTU_ENV, WINDOWS_ENV
from gui.host_os import HostOS
import customtkinter as ctk

from crab import Experiment
from crab.agents.backend_models import OpenAIModel
from crab.agents.policies import SingleAgentPolicy
from gui.utils import get_benchmark

warnings.filterwarnings("ignore")


def check_host_os() -> HostOS:
return HostOS.WINDOWS


def get_benchmark(env: str, ubuntu_url: str):
ubuntu_tool = {
"screenshot": groundingdino_easyocr(font_size=16) >> get_elements_prompt
}
android_tool = {
"screenshot": groundingdino_easyocr(font_size=40) >> get_elements_prompt
}
mac_tool = {
"screenshot": groundingdino_easyocr(font_size=24) >> get_elements_prompt
}

if env == "ubuntu":
prompting_tools = {"ubuntu": ubuntu_tool}
benchmark_config = BenchmarkConfig(
name="ubuntu_benchmark",
tasks=[],
environments=[UBUNTU_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
multienv=True,
)
elif env == "android":
prompting_tools = {"android": android_tool}
benchmark_config = BenchmarkConfig(
name="android_benchmark",
tasks=[],
environments=[ANDROID_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
multienv=True,
)
elif env == "cross":
prompting_tools = {
"android": android_tool,
"ubuntu": ubuntu_tool,
}
benchmark_config = BenchmarkConfig(
name="ubuntu_android_benchmark",
tasks=[],
environments=[UBUNTU_ENV, ANDROID_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
multienv=True,
)
elif env == "mac":
prompting_tools = {"macos": mac_tool}
benchmark_config = BenchmarkConfig(
name="mac_benchmark",
tasks=[],
environments=[mac_env, ANDROID_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
multienv=True,
)
elif env == "windows":
prompting_tools = {"windows": ubuntu_tool}
benchmark_config = BenchmarkConfig(
name="windows_benchmark",
tasks=[],
environments=[WINDOWS_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
multienv=True,
)
else:
raise ValueError("Env not support")
def assign_task():
task_description = input_entry.get()
input_entry.delete(0, "end")
display_message(task_description)

benchmark_config.step_limit = 15
return create_benchmark(benchmark_config)
task_id = str(uuid4())
benchmark = get_benchmark(task_id, task_description)
expeirment = Experiment(
benchmark=benchmark,
task_id=task_id,
agent_policy=agent_policy,
log_dir=log_dir,
)
expeirment.start_benchmark()


def main():
host_os = check_host_os()
print(f"Host OS: {host_os}")
def display_message(message, sender="user"):
chat_display.configure(state="normal")
if sender == "user":
chat_display.insert("end", f"User: {message}\n", "user")
else:
chat_display.insert("end", f"AI: {message}\n", "ai")
chat_display.tag_config("user", justify="left", foreground="blue")
chat_display.tag_config("ai", justify="right", foreground="green")
chat_display.configure(state="disabled")
chat_display.see("end")
app.update_idletasks()


if __name__ == "__main__":
main()
model = OpenAIModel(model="gpt-4o", history_messages_len=2)
agent_policy = SingleAgentPolicy(model_backend=model)
log_dir = (Path(__file__).parent / "logs").resolve()

ctk.set_appearance_mode("System")
ctk.set_default_color_theme("blue")

app = ctk.CTk()
app.title("CRAB")
app.geometry("400x500")

chat_display_frame = ctk.CTkFrame(app, width=380, height=380)
chat_display_frame.pack(pady=10)
chat_display = ctk.CTkTextbox(
chat_display_frame, width=380, height=380, state="disabled"
)
chat_display.pack()

# Frame for input and send button side-by-side
input_frame = ctk.CTkFrame(app)
input_frame.pack(pady=10, padx=10, fill="x")

# Entry widget for user input
input_entry = ctk.CTkEntry(
input_frame, placeholder_text="Type your message here..."
)
input_entry.pack(side="left", fill="x", expand=True, padx=(0, 10))

send_button = ctk.CTkButton(input_frame, text="Send", command=assign_task)
send_button.pack(side="right")
app.mainloop()
110 changes: 110 additions & 0 deletions gui/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. ===========

from crab import (
Benchmark,
BenchmarkConfig,
Task,
create_benchmark,
evaluator,
)
from crab.actions.crab_actions import complete
from crab.actions.visual_prompt_actions import (
get_elements_prompt,
groundingdino_easyocr,
)
from gui.envs import MAC_ENV, UBUNTU_ENV, WINDOWS_ENV
from gui.host_os import HostOS


def check_host_os() -> HostOS:
# TODO: Check the host OS and return the corresponding HostOS enum
return HostOS.WINDOWS


@evaluator(env_name="ubuntu")
def empty_evaluator_linux() -> bool:
return False


@evaluator(env_name="mac")
def empty_evaluator_mac() -> bool:
return False


@evaluator(env_name="windows")
def empty_evaluator_windows() -> bool:
return False


def get_benchmark(task_id: str, task_description: str) -> Benchmark:
host_os = check_host_os()

ubuntu_tool = {
"screenshot": groundingdino_easyocr(font_size=16) >> get_elements_prompt
}
mac_tool = {
"screenshot": groundingdino_easyocr(font_size=24) >> get_elements_prompt
}

if host_os == HostOS.LINUX:
prompting_tools = {"ubuntu": ubuntu_tool}
benchmark_config = BenchmarkConfig(
name="ubuntu_benchmark",
tasks=[
Task(
id=task_id,
description=task_description,
evaluator=empty_evaluator_linux,
)
],
environments=[UBUNTU_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
)
elif host_os == HostOS.MAC:
prompting_tools = {"macos": mac_tool}
benchmark_config = BenchmarkConfig(
name="mac_benchmark",
tasks=[
Task(
id=task_id,
description=task_description,
evaluator=empty_evaluator_mac,
)
],
environments=[MAC_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
)
elif host_os == HostOS.WINDOWS:
prompting_tools = {"windows": ubuntu_tool}
benchmark_config = BenchmarkConfig(
name="windows_benchmark",
tasks=[
Task(
id=task_id,
description=task_description,
evaluator=empty_evaluator_windows,
)
],
environments=[WINDOWS_ENV],
prompting_tools=prompting_tools,
root_action_space=[complete],
)
else:
raise ValueError(f"Host OS {host_os} is not supported")

benchmark_config.step_limit = 15
return create_benchmark(benchmark_config)
Loading

0 comments on commit 2d3e913

Please sign in to comment.