[2.9.0] Rename plugin to compound-engineering
BREAKING: Plugin renamed from compounding-engineering to compound-engineering. Users will need to reinstall with the new name: claude /plugin install compound-engineering Changes: - Renamed plugin directory and all references - Updated documentation counts (24 agents, 19 commands) - Added julik-frontend-races-reviewer to docs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,359 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
# DSPy.rb Configuration Examples
|
||||
# This file demonstrates various configuration patterns for different use cases
|
||||
|
||||
require 'dspy'
|
||||
|
||||
# ============================================================================
|
||||
# Basic Configuration
|
||||
# ============================================================================
|
||||
|
||||
# Simple OpenAI configuration
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Multi-Provider Configuration
|
||||
# ============================================================================
|
||||
|
||||
# Anthropic Claude
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('anthropic/claude-3-5-sonnet-20241022',
|
||||
api_key: ENV['ANTHROPIC_API_KEY'])
|
||||
end
|
||||
|
||||
# Google Gemini
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('gemini/gemini-1.5-pro',
|
||||
api_key: ENV['GOOGLE_API_KEY'])
|
||||
end
|
||||
|
||||
# Local Ollama
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('ollama/llama3.1',
|
||||
base_url: 'http://localhost:11434')
|
||||
end
|
||||
|
||||
# OpenRouter (access to 200+ models)
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('openrouter/anthropic/claude-3.5-sonnet',
|
||||
api_key: ENV['OPENROUTER_API_KEY'],
|
||||
base_url: 'https://openrouter.ai/api/v1')
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Environment-Based Configuration
|
||||
# ============================================================================
|
||||
|
||||
# Different models for different environments
|
||||
if Rails.env.development?
|
||||
# Use local Ollama for development (free, private)
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('ollama/llama3.1')
|
||||
end
|
||||
elsif Rails.env.test?
|
||||
# Use cheap model for testing
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
end
|
||||
else
|
||||
# Use powerful model for production
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('anthropic/claude-3-5-sonnet-20241022',
|
||||
api_key: ENV['ANTHROPIC_API_KEY'])
|
||||
end
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Configuration with Custom Parameters
|
||||
# ============================================================================
|
||||
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('openai/gpt-4o',
|
||||
api_key: ENV['OPENAI_API_KEY'],
|
||||
temperature: 0.7, # Creativity (0.0-2.0, default: 1.0)
|
||||
max_tokens: 2000, # Maximum response length
|
||||
top_p: 0.9, # Nucleus sampling
|
||||
frequency_penalty: 0.0, # Reduce repetition (-2.0 to 2.0)
|
||||
presence_penalty: 0.0 # Encourage new topics (-2.0 to 2.0)
|
||||
)
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Multiple Model Configuration (Task-Specific)
|
||||
# ============================================================================
|
||||
|
||||
# Create different language models for different tasks
|
||||
module MyApp
|
||||
# Fast model for simple tasks
|
||||
FAST_LM = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
api_key: ENV['OPENAI_API_KEY'],
|
||||
temperature: 0.3 # More deterministic
|
||||
)
|
||||
|
||||
# Powerful model for complex tasks
|
||||
POWERFUL_LM = DSPy::LM.new('anthropic/claude-3-5-sonnet-20241022',
|
||||
api_key: ENV['ANTHROPIC_API_KEY'],
|
||||
temperature: 0.7
|
||||
)
|
||||
|
||||
# Creative model for content generation
|
||||
CREATIVE_LM = DSPy::LM.new('openai/gpt-4o',
|
||||
api_key: ENV['OPENAI_API_KEY'],
|
||||
temperature: 1.2, # More creative
|
||||
top_p: 0.95
|
||||
)
|
||||
|
||||
# Vision-capable model
|
||||
VISION_LM = DSPy::LM.new('openai/gpt-4o',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
end
|
||||
|
||||
# Use in modules
|
||||
class SimpleClassifier < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
DSPy.configure { |c| c.lm = MyApp::FAST_LM }
|
||||
@predictor = DSPy::Predict.new(SimpleSignature)
|
||||
end
|
||||
end
|
||||
|
||||
class ComplexAnalyzer < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
DSPy.configure { |c| c.lm = MyApp::POWERFUL_LM }
|
||||
@predictor = DSPy::ChainOfThought.new(ComplexSignature)
|
||||
end
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Configuration with Observability (OpenTelemetry)
|
||||
# ============================================================================
|
||||
|
||||
require 'opentelemetry/sdk'
|
||||
|
||||
# Configure OpenTelemetry
|
||||
OpenTelemetry::SDK.configure do |c|
|
||||
c.service_name = 'my-dspy-app'
|
||||
c.use_all
|
||||
end
|
||||
|
||||
# Configure DSPy (automatically integrates with OpenTelemetry)
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Configuration with Langfuse Tracing
|
||||
# ============================================================================
|
||||
|
||||
require 'dspy/langfuse'
|
||||
|
||||
DSPy.configure do |c|
|
||||
c.lm = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
|
||||
# Enable Langfuse tracing
|
||||
c.langfuse = {
|
||||
public_key: ENV['LANGFUSE_PUBLIC_KEY'],
|
||||
secret_key: ENV['LANGFUSE_SECRET_KEY'],
|
||||
host: ENV['LANGFUSE_HOST'] || 'https://cloud.langfuse.com'
|
||||
}
|
||||
end
|
||||
|
||||
# ============================================================================
|
||||
# Configuration with Retry Logic
|
||||
# ============================================================================
|
||||
|
||||
class RetryableConfig
|
||||
MAX_RETRIES = 3
|
||||
|
||||
def self.configure
|
||||
DSPy.configure do |c|
|
||||
c.lm = create_lm_with_retry
|
||||
end
|
||||
end
|
||||
|
||||
def self.create_lm_with_retry
|
||||
lm = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
|
||||
# Wrap with retry logic
|
||||
lm.extend(RetryBehavior)
|
||||
lm
|
||||
end
|
||||
|
||||
module RetryBehavior
|
||||
def forward(input, retry_count: 0)
|
||||
super(input)
|
||||
rescue RateLimitError, TimeoutError => e
|
||||
if retry_count < MAX_RETRIES
|
||||
sleep(2 ** retry_count) # Exponential backoff
|
||||
forward(input, retry_count: retry_count + 1)
|
||||
else
|
||||
raise
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
RetryableConfig.configure
|
||||
|
||||
# ============================================================================
|
||||
# Configuration with Fallback Models
|
||||
# ============================================================================
|
||||
|
||||
class FallbackConfig
|
||||
def self.configure
|
||||
DSPy.configure do |c|
|
||||
c.lm = create_lm_with_fallback
|
||||
end
|
||||
end
|
||||
|
||||
def self.create_lm_with_fallback
|
||||
primary = DSPy::LM.new('anthropic/claude-3-5-sonnet-20241022',
|
||||
api_key: ENV['ANTHROPIC_API_KEY'])
|
||||
|
||||
fallback = DSPy::LM.new('openai/gpt-4o',
|
||||
api_key: ENV['OPENAI_API_KEY'])
|
||||
|
||||
FallbackLM.new(primary, fallback)
|
||||
end
|
||||
|
||||
class FallbackLM
|
||||
def initialize(primary, fallback)
|
||||
@primary = primary
|
||||
@fallback = fallback
|
||||
end
|
||||
|
||||
def forward(input)
|
||||
@primary.forward(input)
|
||||
rescue => e
|
||||
puts "Primary model failed: #{e.message}. Falling back..."
|
||||
@fallback.forward(input)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
FallbackConfig.configure
|
||||
|
||||
# ============================================================================
|
||||
# Configuration with Budget Tracking
|
||||
# ============================================================================
|
||||
|
||||
class BudgetTrackedConfig
|
||||
def self.configure(monthly_budget_usd:)
|
||||
DSPy.configure do |c|
|
||||
c.lm = BudgetTracker.new(
|
||||
DSPy::LM.new('openai/gpt-4o',
|
||||
api_key: ENV['OPENAI_API_KEY']),
|
||||
monthly_budget_usd: monthly_budget_usd
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
class BudgetTracker
|
||||
def initialize(lm, monthly_budget_usd:)
|
||||
@lm = lm
|
||||
@monthly_budget_usd = monthly_budget_usd
|
||||
@monthly_cost = 0.0
|
||||
end
|
||||
|
||||
def forward(input)
|
||||
result = @lm.forward(input)
|
||||
|
||||
# Track cost (simplified - actual costs vary by model)
|
||||
tokens = result.metadata[:usage][:total_tokens]
|
||||
cost = estimate_cost(tokens)
|
||||
@monthly_cost += cost
|
||||
|
||||
if @monthly_cost > @monthly_budget_usd
|
||||
raise "Monthly budget of $#{@monthly_budget_usd} exceeded!"
|
||||
end
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def estimate_cost(tokens)
|
||||
# Simplified cost estimation (check provider pricing)
|
||||
(tokens / 1_000_000.0) * 5.0 # $5 per 1M tokens
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
BudgetTrackedConfig.configure(monthly_budget_usd: 100)
|
||||
|
||||
# ============================================================================
|
||||
# Configuration Initializer for Rails
|
||||
# ============================================================================
|
||||
|
||||
# Save this as config/initializers/dspy.rb
|
||||
#
|
||||
# require 'dspy'
|
||||
#
|
||||
# DSPy.configure do |c|
|
||||
# # Environment-specific configuration
|
||||
# model_config = case Rails.env.to_sym
|
||||
# when :development
|
||||
# { provider: 'ollama', model: 'llama3.1' }
|
||||
# when :test
|
||||
# { provider: 'openai', model: 'gpt-4o-mini', temperature: 0.0 }
|
||||
# when :production
|
||||
# { provider: 'anthropic', model: 'claude-3-5-sonnet-20241022' }
|
||||
# end
|
||||
#
|
||||
# # Configure language model
|
||||
# c.lm = DSPy::LM.new(
|
||||
# "#{model_config[:provider]}/#{model_config[:model]}",
|
||||
# api_key: ENV["#{model_config[:provider].upcase}_API_KEY"],
|
||||
# **model_config.except(:provider, :model)
|
||||
# )
|
||||
#
|
||||
# # Optional: Add observability
|
||||
# if Rails.env.production?
|
||||
# c.langfuse = {
|
||||
# public_key: ENV['LANGFUSE_PUBLIC_KEY'],
|
||||
# secret_key: ENV['LANGFUSE_SECRET_KEY']
|
||||
# }
|
||||
# end
|
||||
# end
|
||||
|
||||
# ============================================================================
|
||||
# Testing Configuration
|
||||
# ============================================================================
|
||||
|
||||
# In spec/spec_helper.rb or test/test_helper.rb
|
||||
#
|
||||
# RSpec.configure do |config|
|
||||
# config.before(:suite) do
|
||||
# DSPy.configure do |c|
|
||||
# c.lm = DSPy::LM.new('openai/gpt-4o-mini',
|
||||
# api_key: ENV['OPENAI_API_KEY'],
|
||||
# temperature: 0.0 # Deterministic for testing
|
||||
# )
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
|
||||
# ============================================================================
|
||||
# Configuration Best Practices
|
||||
# ============================================================================
|
||||
|
||||
# 1. Use environment variables for API keys (never hardcode)
|
||||
# 2. Use different models for different environments
|
||||
# 3. Use cheaper/faster models for development and testing
|
||||
# 4. Configure temperature based on use case:
|
||||
# - 0.0-0.3: Deterministic, factual tasks
|
||||
# - 0.7-1.0: Balanced creativity
|
||||
# - 1.0-2.0: High creativity, content generation
|
||||
# 5. Add observability in production (OpenTelemetry, Langfuse)
|
||||
# 6. Implement retry logic and fallbacks for reliability
|
||||
# 7. Track costs and set budgets for production
|
||||
# 8. Use max_tokens to control response length and costs
|
||||
@@ -0,0 +1,326 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
# Example DSPy Module Template
|
||||
# This template demonstrates best practices for creating composable modules
|
||||
|
||||
# Basic module with single predictor
|
||||
class BasicModule < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
# Initialize predictor with signature
|
||||
@predictor = DSPy::Predict.new(ExampleSignature)
|
||||
end
|
||||
|
||||
def forward(input_hash)
|
||||
# Forward pass through the predictor
|
||||
@predictor.forward(input_hash)
|
||||
end
|
||||
end
|
||||
|
||||
# Module with Chain of Thought reasoning
|
||||
class ChainOfThoughtModule < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
# ChainOfThought automatically adds reasoning to output
|
||||
@predictor = DSPy::ChainOfThought.new(EmailClassificationSignature)
|
||||
end
|
||||
|
||||
def forward(email_subject:, email_body:)
|
||||
result = @predictor.forward(
|
||||
email_subject: email_subject,
|
||||
email_body: email_body
|
||||
)
|
||||
|
||||
# Result includes :reasoning field automatically
|
||||
{
|
||||
category: result[:category],
|
||||
priority: result[:priority],
|
||||
reasoning: result[:reasoning],
|
||||
confidence: calculate_confidence(result)
|
||||
}
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def calculate_confidence(result)
|
||||
# Add custom logic to calculate confidence
|
||||
# For example, based on reasoning length or specificity
|
||||
result[:confidence] || 0.8
|
||||
end
|
||||
end
|
||||
|
||||
# Composable module that chains multiple steps
|
||||
class MultiStepPipeline < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
# Initialize multiple predictors for different steps
|
||||
@step1 = DSPy::Predict.new(Step1Signature)
|
||||
@step2 = DSPy::ChainOfThought.new(Step2Signature)
|
||||
@step3 = DSPy::Predict.new(Step3Signature)
|
||||
end
|
||||
|
||||
def forward(input)
|
||||
# Chain predictors together
|
||||
result1 = @step1.forward(input)
|
||||
result2 = @step2.forward(result1)
|
||||
result3 = @step3.forward(result2)
|
||||
|
||||
# Combine results as needed
|
||||
{
|
||||
step1_output: result1,
|
||||
step2_output: result2,
|
||||
final_result: result3
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
# Module with conditional logic
|
||||
class ConditionalModule < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
@simple_classifier = DSPy::Predict.new(SimpleClassificationSignature)
|
||||
@complex_analyzer = DSPy::ChainOfThought.new(ComplexAnalysisSignature)
|
||||
end
|
||||
|
||||
def forward(text:, complexity_threshold: 100)
|
||||
# Use different predictors based on input characteristics
|
||||
if text.length < complexity_threshold
|
||||
@simple_classifier.forward(text: text)
|
||||
else
|
||||
@complex_analyzer.forward(text: text)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Module with error handling and retry logic
|
||||
class RobustModule < DSPy::Module
|
||||
MAX_RETRIES = 3
|
||||
|
||||
def initialize
|
||||
super
|
||||
@predictor = DSPy::Predict.new(RobustSignature)
|
||||
@logger = Logger.new(STDOUT)
|
||||
end
|
||||
|
||||
def forward(input, retry_count: 0)
|
||||
@logger.info "Processing input: #{input.inspect}"
|
||||
|
||||
begin
|
||||
result = @predictor.forward(input)
|
||||
validate_result!(result)
|
||||
result
|
||||
rescue DSPy::ValidationError => e
|
||||
@logger.error "Validation error: #{e.message}"
|
||||
|
||||
if retry_count < MAX_RETRIES
|
||||
@logger.info "Retrying (#{retry_count + 1}/#{MAX_RETRIES})..."
|
||||
sleep(2 ** retry_count) # Exponential backoff
|
||||
forward(input, retry_count: retry_count + 1)
|
||||
else
|
||||
@logger.error "Max retries exceeded"
|
||||
raise
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def validate_result!(result)
|
||||
# Add custom validation logic
|
||||
raise DSPy::ValidationError, "Invalid result" unless result[:category]
|
||||
raise DSPy::ValidationError, "Low confidence" if result[:confidence] && result[:confidence] < 0.5
|
||||
end
|
||||
end
|
||||
|
||||
# Module with ReAct agent and tools
|
||||
class AgentModule < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
|
||||
# Define tools for the agent
|
||||
tools = [
|
||||
SearchTool.new,
|
||||
CalculatorTool.new,
|
||||
DatabaseQueryTool.new
|
||||
]
|
||||
|
||||
# ReAct provides iterative reasoning and tool usage
|
||||
@agent = DSPy::ReAct.new(
|
||||
AgentSignature,
|
||||
tools: tools,
|
||||
max_iterations: 5
|
||||
)
|
||||
end
|
||||
|
||||
def forward(task:)
|
||||
# Agent will autonomously use tools to complete the task
|
||||
@agent.forward(task: task)
|
||||
end
|
||||
end
|
||||
|
||||
# Tool definition example
|
||||
class SearchTool < DSPy::Tool
|
||||
def call(query:)
|
||||
# Implement search functionality
|
||||
results = perform_search(query)
|
||||
{ results: results }
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def perform_search(query)
|
||||
# Actual search implementation
|
||||
# Could call external API, database, etc.
|
||||
["result1", "result2", "result3"]
|
||||
end
|
||||
end
|
||||
|
||||
# Module with state management
|
||||
class StatefulModule < DSPy::Module
|
||||
attr_reader :history
|
||||
|
||||
def initialize
|
||||
super
|
||||
@predictor = DSPy::ChainOfThought.new(StatefulSignature)
|
||||
@history = []
|
||||
end
|
||||
|
||||
def forward(input)
|
||||
# Process with context from history
|
||||
context = build_context_from_history
|
||||
result = @predictor.forward(
|
||||
input: input,
|
||||
context: context
|
||||
)
|
||||
|
||||
# Store in history
|
||||
@history << {
|
||||
input: input,
|
||||
result: result,
|
||||
timestamp: Time.now
|
||||
}
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
def reset!
|
||||
@history.clear
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def build_context_from_history
|
||||
@history.last(5).map { |h| h[:result][:summary] }.join("\n")
|
||||
end
|
||||
end
|
||||
|
||||
# Module that uses different LLMs for different tasks
|
||||
class MultiModelModule < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
|
||||
# Fast, cheap model for simple classification
|
||||
@fast_predictor = create_predictor(
|
||||
'openai/gpt-4o-mini',
|
||||
SimpleClassificationSignature
|
||||
)
|
||||
|
||||
# Powerful model for complex analysis
|
||||
@powerful_predictor = create_predictor(
|
||||
'anthropic/claude-3-5-sonnet-20241022',
|
||||
ComplexAnalysisSignature
|
||||
)
|
||||
end
|
||||
|
||||
def forward(input, use_complex: false)
|
||||
if use_complex
|
||||
@powerful_predictor.forward(input)
|
||||
else
|
||||
@fast_predictor.forward(input)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def create_predictor(model, signature)
|
||||
lm = DSPy::LM.new(model, api_key: ENV["#{model.split('/').first.upcase}_API_KEY"])
|
||||
DSPy::Predict.new(signature, lm: lm)
|
||||
end
|
||||
end
|
||||
|
||||
# Module with caching
|
||||
class CachedModule < DSPy::Module
|
||||
def initialize
|
||||
super
|
||||
@predictor = DSPy::Predict.new(CachedSignature)
|
||||
@cache = {}
|
||||
end
|
||||
|
||||
def forward(input)
|
||||
# Create cache key from input
|
||||
cache_key = create_cache_key(input)
|
||||
|
||||
# Return cached result if available
|
||||
if @cache.key?(cache_key)
|
||||
puts "Cache hit for #{cache_key}"
|
||||
return @cache[cache_key]
|
||||
end
|
||||
|
||||
# Compute and cache result
|
||||
result = @predictor.forward(input)
|
||||
@cache[cache_key] = result
|
||||
result
|
||||
end
|
||||
|
||||
def clear_cache!
|
||||
@cache.clear
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def create_cache_key(input)
|
||||
# Create deterministic hash from input
|
||||
Digest::MD5.hexdigest(input.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
# Usage Examples:
|
||||
#
|
||||
# Basic usage:
|
||||
# module = BasicModule.new
|
||||
# result = module.forward(field_name: "value")
|
||||
#
|
||||
# Chain of Thought:
|
||||
# module = ChainOfThoughtModule.new
|
||||
# result = module.forward(
|
||||
# email_subject: "Can't log in",
|
||||
# email_body: "I'm unable to access my account"
|
||||
# )
|
||||
# puts result[:reasoning]
|
||||
#
|
||||
# Multi-step pipeline:
|
||||
# pipeline = MultiStepPipeline.new
|
||||
# result = pipeline.forward(input_data)
|
||||
#
|
||||
# With error handling:
|
||||
# module = RobustModule.new
|
||||
# begin
|
||||
# result = module.forward(input_data)
|
||||
# rescue DSPy::ValidationError => e
|
||||
# puts "Failed after retries: #{e.message}"
|
||||
# end
|
||||
#
|
||||
# Agent with tools:
|
||||
# agent = AgentModule.new
|
||||
# result = agent.forward(task: "Find the population of Tokyo")
|
||||
#
|
||||
# Stateful processing:
|
||||
# module = StatefulModule.new
|
||||
# result1 = module.forward("First input")
|
||||
# result2 = module.forward("Second input") # Has context from first
|
||||
# module.reset! # Clear history
|
||||
#
|
||||
# With caching:
|
||||
# module = CachedModule.new
|
||||
# result1 = module.forward(input) # Computes result
|
||||
# result2 = module.forward(input) # Returns cached result
|
||||
@@ -0,0 +1,143 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
# Example DSPy Signature Template
|
||||
# This template demonstrates best practices for creating type-safe signatures
|
||||
|
||||
class ExampleSignature < DSPy::Signature
|
||||
# Clear, specific description of what this signature does
|
||||
# Good: "Classify customer support emails into Technical, Billing, or General categories"
|
||||
# Avoid: "Classify emails"
|
||||
description "Describe what this signature accomplishes and what output it produces"
|
||||
|
||||
# Input fields: Define what data the LLM receives
|
||||
input do
|
||||
# Basic field with description
|
||||
const :field_name, String, desc: "Clear description of this input field"
|
||||
|
||||
# Numeric fields
|
||||
const :count, Integer, desc: "Number of items to process"
|
||||
const :score, Float, desc: "Confidence score between 0.0 and 1.0"
|
||||
|
||||
# Boolean fields
|
||||
const :is_active, T::Boolean, desc: "Whether the item is currently active"
|
||||
|
||||
# Array fields
|
||||
const :tags, T::Array[String], desc: "List of tags associated with the item"
|
||||
|
||||
# Optional: Enum for constrained values
|
||||
const :priority, T.enum(["Low", "Medium", "High"]), desc: "Priority level"
|
||||
end
|
||||
|
||||
# Output fields: Define what data the LLM produces
|
||||
output do
|
||||
# Primary output
|
||||
const :result, String, desc: "The main result of the operation"
|
||||
|
||||
# Classification result with enum
|
||||
const :category, T.enum(["Technical", "Billing", "General"]),
|
||||
desc: "Category classification - must be one of: Technical, Billing, General"
|
||||
|
||||
# Confidence/metadata
|
||||
const :confidence, Float, desc: "Confidence score (0.0-1.0) for this classification"
|
||||
|
||||
# Optional reasoning (automatically added by ChainOfThought)
|
||||
# const :reasoning, String, desc: "Step-by-step reasoning for the classification"
|
||||
end
|
||||
end
|
||||
|
||||
# Example with multimodal input (vision)
|
||||
class VisionExampleSignature < DSPy::Signature
|
||||
description "Analyze an image and answer questions about its content"
|
||||
|
||||
input do
|
||||
const :image, DSPy::Image, desc: "The image to analyze"
|
||||
const :question, String, desc: "Question about the image content"
|
||||
end
|
||||
|
||||
output do
|
||||
const :answer, String, desc: "Detailed answer to the question about the image"
|
||||
const :confidence, Float, desc: "Confidence in the answer (0.0-1.0)"
|
||||
end
|
||||
end
|
||||
|
||||
# Example for complex analysis task
|
||||
class SentimentAnalysisSignature < DSPy::Signature
|
||||
description "Analyze the sentiment of text with nuanced emotion detection"
|
||||
|
||||
input do
|
||||
const :text, String, desc: "The text to analyze for sentiment"
|
||||
const :context, String, desc: "Additional context about the text source or situation"
|
||||
end
|
||||
|
||||
output do
|
||||
const :sentiment, T.enum(["Positive", "Negative", "Neutral", "Mixed"]),
|
||||
desc: "Overall sentiment - must be Positive, Negative, Neutral, or Mixed"
|
||||
|
||||
const :emotions, T::Array[String],
|
||||
desc: "List of specific emotions detected (e.g., joy, anger, sadness, fear)"
|
||||
|
||||
const :intensity, T.enum(["Low", "Medium", "High"]),
|
||||
desc: "Intensity of the detected sentiment"
|
||||
|
||||
const :confidence, Float,
|
||||
desc: "Confidence in the sentiment classification (0.0-1.0)"
|
||||
end
|
||||
end
|
||||
|
||||
# Example for code generation task
|
||||
class CodeGenerationSignature < DSPy::Signature
|
||||
description "Generate Ruby code based on natural language requirements"
|
||||
|
||||
input do
|
||||
const :requirements, String,
|
||||
desc: "Natural language description of what the code should do"
|
||||
|
||||
const :constraints, String,
|
||||
desc: "Any specific requirements or constraints (e.g., libraries to use, style preferences)"
|
||||
end
|
||||
|
||||
output do
|
||||
const :code, String,
|
||||
desc: "Complete, working Ruby code that fulfills the requirements"
|
||||
|
||||
const :explanation, String,
|
||||
desc: "Brief explanation of how the code works and any important design decisions"
|
||||
|
||||
const :dependencies, T::Array[String],
|
||||
desc: "List of required gems or dependencies"
|
||||
end
|
||||
end
|
||||
|
||||
# Usage Examples:
|
||||
#
|
||||
# Basic usage with Predict:
|
||||
# predictor = DSPy::Predict.new(ExampleSignature)
|
||||
# result = predictor.forward(
|
||||
# field_name: "example value",
|
||||
# count: 5,
|
||||
# score: 0.85,
|
||||
# is_active: true,
|
||||
# tags: ["tag1", "tag2"],
|
||||
# priority: "High"
|
||||
# )
|
||||
# puts result[:result]
|
||||
# puts result[:category]
|
||||
# puts result[:confidence]
|
||||
#
|
||||
# With Chain of Thought reasoning:
|
||||
# predictor = DSPy::ChainOfThought.new(SentimentAnalysisSignature)
|
||||
# result = predictor.forward(
|
||||
# text: "I absolutely love this product! It exceeded all my expectations.",
|
||||
# context: "Product review on e-commerce site"
|
||||
# )
|
||||
# puts result[:reasoning] # See the LLM's step-by-step thinking
|
||||
# puts result[:sentiment]
|
||||
# puts result[:emotions]
|
||||
#
|
||||
# With Vision:
|
||||
# predictor = DSPy::Predict.new(VisionExampleSignature)
|
||||
# result = predictor.forward(
|
||||
# image: DSPy::Image.from_file("path/to/image.jpg"),
|
||||
# question: "What objects are visible in this image?"
|
||||
# )
|
||||
# puts result[:answer]
|
||||
Reference in New Issue
Block a user