Page cover

Example: Using the Python Wrapper for LLM Integration

Example: Using the Python Wrapper for LLM Integration

import asyncio
from juliaos import JuliaOS
from juliaos.llm import OpenAIProvider, ClaudeProvider, MistralProvider
from juliaos.langchain import JuliaOSToolkit

async def use_llm_integration():
    # Initialize JuliaOS
    juliaos_client = JuliaOS(host="localhost", port=8052)
    await juliaos_client.connect()

    # Create an LLM provider (choose one)
    openai_llm = OpenAIProvider(api_key="your_openai_api_key", model="gpt-4")
    claude_llm = ClaudeProvider(api_key="your_anthropic_api_key", model="claude-3-opus-20240229")
    mistral_llm = MistralProvider(api_key="your_mistral_api_key", model="mistral-large-latest")

    # Use the OpenAI provider for this example
    llm = openai_llm

    # Create a swarm using the LLM for guidance
    swarm = await juliaos_client.swarms.create_swarm(
        name="LLM-Guided Swarm",
        algorithm="PSO",
        config={
            "llm_provider": "openai",
            "llm_guidance": True,
            "population_size": 30,
            "adaptive_parameters": True
        }
    )

    # Run an optimization with LLM guidance
    result = await juliaos_client.swarms.run_optimization(
        swarm_id=swarm["id"],
        objective_function="function(x) return sum(x.^2) end",
        parameters={
            "bounds": [(-10, 10), (-10, 10), (-10, 10)],
            "max_iterations": 100,
            "llm_feedback_frequency": 10  # Get LLM feedback every 10 iterations
        }
    )

    print(f"Best position: {result['best_position']}")
    print(f"Best fitness: {result['best_fitness']}")
    print(f"LLM guidance: {result['llm_guidance_summary']}")

    # Create a LangChain toolkit
    toolkit = JuliaOSToolkit(juliaos_client)

    # Use the toolkit with LangChain
    from langchain.agents import initialize_agent, AgentType
    agent = initialize_agent(
        tools=toolkit.get_tools(),
        llm=llm.get_llm(),
        agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
        verbose=True
    )

    # Run the agent
    response = await agent.arun("Create a trading agent and execute a trade on Uniswap.")
    print(f"Agent response: {response}")

    await juliaos_client.disconnect()

# Run the async function
asyncio.run(use_llm_integration())

The Python wrapper provides comprehensive LLM integration capabilities:

  • Multiple LLM Providers: Support for OpenAI, Claude, Mistral, Cohere, Gemini, and local models

  • LangChain Integration: Full integration with LangChain for agent creation and tool usage

  • LLM-Guided Optimization: Use LLMs to guide swarm optimization algorithms

  • Agent Specialization: Create specialized agents with LLM-powered decision making

  • Natural Language Interfaces: Interact with JuliaOS using natural language

  • Hybrid Intelligence: Combine swarm intelligence with LLM capabilities

Last updated