Site icon Next Business 24

A Coding Implementation To Assemble A Self-Adaptive Purpose-Oriented AI Agent Using Google Gemini And The SAGE Framework

A Coding Implementation To Assemble A Self-Adaptive Purpose-Oriented AI Agent Using Google Gemini And The SAGE Framework


@dataclass
class Job:
   id: str
   description: str
   priority: int
   standing: TaskStatus = TaskStatus.PENDING
   dependencies: Itemizing[str] = None
   consequence: Non-compulsory[str] = None
  
   def __post_init__(self):
       if self.dependencies is None:
           self.dependencies = []


class SAGEAgent:
   """Self-Adaptive Purpose-oriented Execution AI Agent"""
  
   def __init__(self, api_key: str, model_name: str = "gemini-1.5-flash"):
       genai.configure(api_key=api_key)
       self.model = genai.GenerativeModel(model_name)
       self.memory = []
       self.duties = {}
       self.context = {}
       self.iteration_count = 0
      
   def self_assess(self, function: str, context: Dict[str, Any]) -> Dict[str, Any]:
       """S: Self-Analysis - Take into account current state and capabilities"""
       assessment_prompt = f"""
       You may be an AI agent conducting self-assessment. Reply ONLY with authentic JSON, no additional textual content material.


       GOAL: {function}
       CONTEXT: {json.dumps(context, indent=2)}
       TASKS_PROCESSED: {len(self.duties)}
      
       Current analysis as JSON with these precise keys:
       {{
           "progress_score": ,
           "belongings": ["list of available resources"],
           "gaps": ["list of knowledge gaps"],
           "risks": ["list of potential risks"],
           "options": ["list of next steps"]
       }}
       """
      
       response = self.model.generate_content(assessment_prompt)
       try:
           textual content material = response.textual content material.strip()
           if textual content material.startswith('```'):
               textual content material = textual content material.break up('```')[1]
               if textual content material.startswith('json'):
                   textual content material = textual content material[4:]
           textual content material = textual content material.strip()
           return json.lots(textual content material)
       moreover Exception as e:
           print(f"Analysis parsing error: {e}")
           return {
               "progress_score": 25,
               "belongings": ["AI capabilities", "Internet knowledge"],
               "gaps": ["Specific domain expertise", "Real-time data"],
               "risks": ["Information accuracy", "Scope complexity"],
               "options": ["Break down into smaller tasks", "Focus on research first"]
           }
  
   def adaptive_plan(self, function: str, analysis: Dict[str, Any]) -> Itemizing[Task]:
       """A: Adaptive Planning - Create dynamic, context-aware job decomposition"""
       planning_prompt = f"""
       You may be an AI job planner. Reply ONLY with authentic JSON array, no additional textual content material.


       MAIN_GOAL: {function}
       ASSESSMENT: {json.dumps(analysis, indent=2)}
      
       Create 3-4 actionable duties as JSON array:
       [
           {{
               "id": "task_1",
               "description": "Clear, specific task description",
               "priority": 5,
               "dependencies": []
           }},
           {{
               "id": "task_2",
               "description": "One different explicit job",
               "priority": 4,
               "dependencies": ["task_1"]
           }}
       ]
      
       Each job ought to have: id (string), description (string), priority (1-5), dependencies (array of strings)
       """
      
       response = self.model.generate_content(planning_prompt)
       try:
           textual content material = response.textual content material.strip()
           if textual content material.startswith('```'):
               textual content material = textual content material.break up('```')[1]
               if textual content material.startswith('json'):
                   textual content material = textual content material[4:]
           textual content material = textual content material.strip()
          
           task_data = json.lots(textual content material)
           duties = []
           for i, task_info in enumerate(task_data):
               job = Job(
                   id=task_info.get('id', f'task_{i+1}'),
                   description=task_info.get('description', 'Undefined job'),
                   priority=task_info.get('priority', 3),
                   dependencies=task_info.get('dependencies', [])
               )
               duties.append(job)
           return duties
       moreover Exception as e:
           print(f"Planning parsing error: {e}")
           return [
               Task(id="research_1", description="Research sustainable urban gardening basics", priority=5),
               Task(id="research_2", description="Identify space-efficient growing methods", priority=4),
               Task(id="compile_1", description="Organize findings into structured guide", priority=3, dependencies=["research_1", "research_2"])
           ]
  
   def execute_goal_oriented(self, job: Job) -> str:
       """G: Purpose-oriented Execution - Execute explicit job with focused consideration"""
       execution_prompt = f"""
       GOAL-ORIENTED EXECUTION:
       Job: {job.description}
       Priority: {job.priority}
       Context: {json.dumps(self.context, indent=2)}
      
       Execute this job step-by-step:
       1. Break down the responsibility into concrete actions
       2. Execute each movement methodically
       3. Validate outcomes at each step
       4. Current full output
      
       Give consideration to smart, actionable outcomes. Be explicit and thorough.
       """
      
       response = self.model.generate_content(execution_prompt)
       return response.textual content material.strip()
  
   def integrate_experience(self, job: Job, consequence: str, success: bool) -> Dict[str, Any]:
       """E: Experience Integration - Examine from outcomes and exchange knowledge"""
       integration_prompt = f"""
       You may be learning from job execution. Reply ONLY with authentic JSON, no additional textual content material.


       TASK: {job.description}
       RESULT: {consequence[:200]}...
       SUCCESS: {success}
      
       Current learning insights as JSON:
       {{
           "learnings": ["key insight 1", "key insight 2"],
           "patterns": ["pattern observed 1", "pattern observed 2"],
           "adjustments": ["adjustment for future 1", "adjustment for future 2"],
           "confidence_boost": 
       }}
       """
      
       response = self.model.generate_content(integration_prompt)
       try:
           textual content material = response.textual content material.strip()
           if textual content material.startswith('```'):
               textual content material = textual content material.break up('```')[1]
               if textual content material.startswith('json'):
                   textual content material = textual content material[4:]
           textual content material = textual content material.strip()
          
           experience = json.lots(textual content material)
           experience['task_id'] = job.id
           experience['timestamp'] = time.time()
           self.memory.append(experience)
           return experience
       moreover Exception as e:
           print(f"Experience parsing error: {e}")
           experience = {
               "learnings": [f"Completed task: {task.description}"],
               "patterns": ["Task execution follows planned approach"],
               "adjustments": ["Continue systematic approach"],
               "confidence_boost": 5 if success else -2,
               "task_id": job.id,
               "timestamp": time.time()
           }
           self.memory.append(experience)
           return experience
  
   def execute_sage_cycle(self, function: str, max_iterations: int = 3) -> Dict[str, Any]:
       """Execute full SAGE cycle for function achievement"""
       print(f"🎯 Starting SAGE cycle for function: {function}")
       outcomes = {"function": function, "iterations": [], "final_status": "unknown"}
      
       for iteration in range(max_iterations):
           self.iteration_count += 1
           print(f"n🔄 SAGE Iteration {iteration + 1}")
          
           print("📊 Self-Analysis...")
           analysis = self.self_assess(function, self.context)
           print(f"Progress Ranking: {analysis.get('progress_score', 0)}/100")
          
           print("🗺️  Adaptive Planning...")
           duties = self.adaptive_plan(function, analysis)
           print(f"Generated {len(duties)} duties")
          
           print("⚡ Purpose-oriented Execution...")
           iteration_results = []
          
           for job in sorted(duties, key=lambda x: x.priority, reverse=True):
               if self._dependencies_met(job):
                   print(f"  Executing: {job.description}")
                   job.standing = TaskStatus.IN_PROGRESS
                  
                   try:
                       consequence = self.execute_goal_oriented(job)
                       job.consequence = consequence
                       job.standing = TaskStatus.COMPLETED
                       success = True
                       print(f"  ✅ Completed: {job.id}")
                   moreover Exception as e:
                       job.standing = TaskStatus.FAILED
                       job.consequence = f"Error: {str(e)}"
                       success = False
                       print(f"  ❌ Failed: {job.id}")
                  
                   experience = self.integrate_experience(job, job.consequence, success)
                  
                   self.duties[task.id] = job
                   iteration_results.append({
                       "job": asdict(job),
                       "experience": experience
                   })
          
           self._update_context(iteration_results)
          
           outcomes["iterations"].append({
               "iteration": iteration + 1,
               "analysis": analysis,
               "tasks_generated": len(duties),
               "tasks_completed": len([r for r in iteration_results if r["task"]["status"] == "achieved"]),
               "outcomes": iteration_results
           })
          
           if analysis.get('progress_score', 0) >= 90:
               outcomes["final_status"] = "achieved"
               print("🎉 Purpose achieved!")
               break
      
       if outcomes["final_status"] == "unknown":
           outcomes["final_status"] = "in_progress"
      
       return outcomes
  
   def _dependencies_met(self, job: Job) -> bool:
       """Take a look at if job dependencies are pleased"""
       for dep_id in job.dependencies:
           if dep_id not in self.duties or self.duties[dep_id].standing != TaskStatus.COMPLETED:
               return False
       return True
  
   def _update_context(self, outcomes: Itemizing[Dict[str, Any]]):
       """Change agent context based on execution outcomes"""
       completed_tasks = [r for r in results if r["task"]["status"] == "achieved"]
       self.context.exchange({
           "completed_tasks": len(completed_tasks),
           "total_tasks": len(self.duties),
           "success_rate": len(completed_tasks) / len(outcomes) if outcomes else 0,
           "last_update": time.time()
       })

Elevate your perspective with NextTech Data, the place innovation meets notion.
Uncover the latest breakthroughs, get distinctive updates, and be a part of with a world group of future-focused thinkers.
Unlock tomorrow’s tendencies at current: be taught additional, subscribe to our publication, and change into part of the NextTech neighborhood at NextTech-news.com

Keep forward of the curve with NextBusiness 24. Discover extra tales, subscribe to our publication, and be a part of our rising group at nextbusiness24.com

Exit mobile version