-
-
Notifications
You must be signed in to change notification settings - Fork 476
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Kye
committed
Nov 6, 2023
1 parent
a70a2b0
commit 336bffe
Showing
8 changed files
with
120 additions
and
140 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,6 @@ | ||
from swarms.models.openai_models import OpenAIChat | ||
|
||
openai = OpenAIChat(openai_api_key="", verbose=False) | ||
openai = OpenAIChat(openai_api_key="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", verbose=False) | ||
|
||
chat = openai("Are quantum fields everywhere?") | ||
chat = openai("What are quantum fields?") | ||
print(chat) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
from swarms.models import OpenAIChat | ||
from swarms.structs import Flow | ||
|
||
api_key = "" | ||
|
||
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC | ||
llm = OpenAIChat( | ||
# model_name="gpt-4" | ||
openai_api_key=api_key, | ||
temperature=0.5, | ||
# max_tokens=100, | ||
) | ||
|
||
## Initialize the workflow | ||
flow = Flow( | ||
llm=llm, | ||
max_loops=2, | ||
dashboard=True, | ||
# stopping_condition=None, # You can define a stopping condition as needed. | ||
# loop_interval=1, | ||
# retry_attempts=3, | ||
# retry_interval=1, | ||
# interactive=False, # Set to 'True' for interactive mode. | ||
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. | ||
) | ||
|
||
# out = flow.load_state("flow_state.json") | ||
# temp = flow.dynamic_temperature() | ||
# filter = flow.add_response_filter("Trump") | ||
out = flow.run("Generate a 10,000 word blog on health and wellness.") | ||
# out = flow.validate_response(out) | ||
# out = flow.analyze_feedback(out) | ||
# out = flow.print_history_and_memory() | ||
# # out = flow.save_state("flow_state.json") | ||
# print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
from swarms.models import OpenAIChat | ||
from swarms.structs import Flow | ||
from swarms.structs.sequential_workflow import SequentialWorkflow | ||
|
||
# Example usage | ||
llm = OpenAIChat( | ||
temperature=0.5, | ||
max_tokens=3000, | ||
) | ||
|
||
# Initialize the Flow with the language flow | ||
flow1 = Flow(llm=llm, max_loops=1, dashboard=False) | ||
|
||
# Create another Flow for a different task | ||
flow2 = Flow(llm=llm, max_loops=1, dashboard=False) | ||
|
||
# Create the workflow | ||
workflow = SequentialWorkflow(max_loops=1) | ||
|
||
# Add tasks to the workflow | ||
workflow.add("Generate a 10,000 word blog on health and wellness.", flow1) | ||
|
||
# Suppose the next task takes the output of the first task as input | ||
workflow.add("Summarize the generated blog", flow2) | ||
|
||
# Run the workflow | ||
workflow.run() | ||
|
||
# Output the results | ||
for task in workflow.tasks: | ||
print(f"Task: {task.description}, Result: {task.result}") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,39 +1,16 @@ | ||
from swarms.swarms import GodMode | ||
from swarms.models import OpenAIChat | ||
|
||
from swarms.swarms import GodMode | ||
from swarms.workers.worker import Worker | ||
api_key = "" | ||
|
||
llm = OpenAIChat(openai_api_key=api_key) | ||
|
||
llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5) | ||
|
||
worker1 = Worker( | ||
llm=llm, | ||
ai_name="Bumble Bee", | ||
ai_role="Worker in a swarm", | ||
external_tools=None, | ||
human_in_the_loop=False, | ||
temperature=0.5, | ||
) | ||
worker2 = Worker( | ||
llm=llm, | ||
ai_name="Optimus Prime", | ||
ai_role="Worker in a swarm", | ||
external_tools=None, | ||
human_in_the_loop=False, | ||
temperature=0.5, | ||
) | ||
worker3 = Worker( | ||
llm=llm, | ||
ai_name="Megatron", | ||
ai_role="Worker in a swarm", | ||
external_tools=None, | ||
human_in_the_loop=False, | ||
temperature=0.5, | ||
) | ||
# Usage | ||
agents = [worker1, worker2, worker3] | ||
llms = [llm, llm, llm] | ||
|
||
god_mode = GodMode(agents) | ||
god_mode = GodMode(llms) | ||
|
||
task = "What are the biggest risks facing humanity?" | ||
task = "Generate a 10,000 word blog on health and wellness." | ||
|
||
out = god_mode.run(task) | ||
god_mode.print_responses(task) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,61 +1,49 @@ | ||
from swarms.models import OpenAIChat | ||
from swarms.swarms import GroupChat, GroupChatManager | ||
from swarms.workers import Worker | ||
from swarms import OpenAI, Flow | ||
from swarms.swarms.groupchat import GroupChatManager, GroupChat | ||
|
||
llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5) | ||
|
||
node = Worker( | ||
llm=llm, | ||
ai_name="Optimus Prime", | ||
ai_role="Worker in a swarm", | ||
external_tools=None, | ||
human_in_the_loop=False, | ||
api_key = "" | ||
|
||
llm = OpenAI( | ||
openai_api_key=api_key, | ||
temperature=0.5, | ||
max_tokens=3000, | ||
) | ||
|
||
node2 = Worker( | ||
# Initialize the flow | ||
flow1 = Flow( | ||
llm=llm, | ||
ai_name="Optimus Prime", | ||
ai_role="Worker in a swarm", | ||
external_tools=None, | ||
human_in_the_loop=False, | ||
temperature=0.5, | ||
max_loops=1, | ||
system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE", | ||
name="silly", | ||
dashboard=True, | ||
) | ||
|
||
node3 = Worker( | ||
flow2 = Flow( | ||
llm=llm, | ||
ai_name="Optimus Prime", | ||
ai_role="Worker in a swarm", | ||
external_tools=None, | ||
human_in_the_loop=False, | ||
temperature=0.5, | ||
max_loops=1, | ||
system_message="YOU ARE VERY SMART AND ANSWER RIDDLES", | ||
name="detective", | ||
dashboard=True, | ||
) | ||
|
||
nodes = [node, node2, node3] | ||
|
||
messages = [ | ||
{ | ||
"role": "system", | ||
"context": "Create an a small feedforward in pytorch", | ||
} | ||
] | ||
|
||
group = GroupChat( | ||
workers=nodes, | ||
messages=messages, | ||
max_rounds=3, | ||
flow3 = Flow( | ||
llm=llm, | ||
max_loops=1, | ||
system_message="YOU MAKE RIDDLES", | ||
name="riddler", | ||
dashboard=True, | ||
) | ||
|
||
|
||
manager = GroupChatManager( | ||
groupchat=group, | ||
max_consecutive_auto_reply=3, | ||
manager = Flow( | ||
llm=llm, | ||
max_loops=1, | ||
system_message="YOU ARE A GROUP CHAT MANAGER", | ||
name="manager", | ||
dashboard=True, | ||
) | ||
|
||
output = group.run( | ||
messages, | ||
sender=node, | ||
config=group, | ||
) | ||
|
||
print(output) | ||
# Example usage: | ||
agents = [flow1, flow2, flow3] | ||
|
||
group_chat = GroupChat(agents=agents, messages=[], max_round=10) | ||
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager) | ||
chat_history = chat_manager("Write me a riddle") |