AI responses are the core functionality of the Sudo platform, allowing you to generate chat completions from a wide variety of models including GPT, Claude, Gemini, and more.

Basic Usage

Simple AI Response

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function simpleResponse() {
  const response = await sudo.router.create({
    model: "claude-sonnet-4-20250514",
    messages: [
      { role: "user", content: "Hello! How are you?" }
    ]
  });
  
  console.log(response.choices[0].message.content);
}

simpleResponse();

Conversation with System Message

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function conversationWithSystem() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [
      {
        role: "system", 
        content: "You are a helpful programming assistant specialized in TypeScript."
      },
      {
        role: "user", 
        content: "How do I create a type-safe object from two arrays?"
      }
    ]
  });
  
  console.log(response.choices[0].message.content);
}

conversationWithSystem();

Multi-turn Conversation

TypeScript
import { Sudo } from "sudo-ai";
import * as readline from 'readline';

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function chatConversation() {
  // Conversation history
  const messages = [
    { role: "system", content: "You are a helpful assistant." }
  ];
  
  const rl = readline.createInterface({
    input: process.stdin,
    output: process.stdout
  });
  
  const askQuestion = (query: string): Promise<string> =>
    new Promise(resolve => rl.question(query, resolve));
  
  console.log("Chat started! Type 'quit' or 'exit' to end.");
  
  while (true) {
    try {
      // Get user input
      const userInput = await askQuestion("You: ");
      if (userInput.toLowerCase() === 'quit' || userInput.toLowerCase() === 'exit') {
        break;
      }
      
      // Add user message
      messages.push({ role: "user", content: userInput });
      
      // Get AI response
      const response = await sudo.router.create({
        model: "claude-sonnet-4-20250514",
        messages: messages
      });
      
      const aiMessage = response.choices[0].message.content;
      console.log(`AI: ${aiMessage}`);
      
      // Add AI response to conversation history
      messages.push({ role: "assistant", content: aiMessage });
      
    } catch (error) {
      console.error("Error:", error);
    }
  }
  
  rl.close();
}

chatConversation();

Parameters and Configuration

Temperature and Randomness

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function temperatureExamples() {
  // Deterministic output (low temperature)
  const deterministic = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "Explain quantum computing" }],
    temperature: 0.1  // Very focused, consistent responses
  });
  
  // Creative output (high temperature)
  const creative = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "Write a creative story" }],
    temperature: 0.9  // More random and creative
  });
  
  console.log("Deterministic:", deterministic.choices[0].message.content.substring(0, 100));
  console.log("Creative:", creative.choices[0].message.content.substring(0, 100));
}

temperatureExamples();

Token Limits

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function tokenLimitsExample() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "Write a summary of machine learning" }],
    maxCompletionTokens: 150  // Limit response length
  });
  
  console.log(`Tokens used: ${response.usage.totalTokens}`);
  console.log(`Response: ${response.choices[0].message.content}`);
}

tokenLimitsExample();

Frequency and Presence Penalties

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function penaltiesExample() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "List programming languages" }],
    frequencyPenalty: 0.5,  // Reduce repetition of frequent words
    presencePenalty: 0.5    // Encourage diverse vocabulary
  });
  
  console.log(response.choices[0].message.content);
}

penaltiesExample();

Stop Sequences

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function stopSequencesExample() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "Count from 1 to 10" }],
    stop: ["5", "---"]  // Stop generation at these sequences
  });
  
  console.log(response.choices[0].message.content);
}

stopSequencesExample();

Response Format Options

Multiple Choices

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function multipleChoicesExample() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "Give me a creative business idea" }],
    n: 3  // Generate 3 different responses
  });
  
  response.choices.forEach((choice, index) => {
    console.log(`Idea ${index + 1}: ${choice.message.content}`);
    console.log("-".repeat(50));
  });
}

multipleChoicesExample();

Log Probabilities

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function logProbsExample() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "The capital of France is" }],
    logprobs: true,
    topLogprobs: 3  // Show top 3 token probabilities
  });
  
  const choice = response.choices[0];
  if (choice.logprobs) {
    choice.logprobs.content.forEach(tokenInfo => {
      console.log(`Token: ${tokenInfo.token}`);
      console.log(`Probability: ${tokenInfo.logprob}`);
      console.log("Top alternatives:");
      tokenInfo.topLogprobs.forEach(alt => {
        console.log(`  ${alt.token}: ${alt.logprob}`);
      });
    });
  }
}

logProbsExample();

Metadata and Storage

Adding Metadata

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function metadataExample() {
  const response = await sudo.router.create({
    model: "gpt-4o",
    messages: [{ role: "user", content: "Explain TypeScript decorators" }],
    metadata: {
      userId: "user_123",
      sessionId: "session_456",
      feature: "code_explanation",
      version: "1.0"
    },
    store: true  // Store for later retrieval (OpenAI compatible)
  });
  
  console.log(`Completion ID: ${response.id}`);
  console.log(`Response: ${response.choices[0].message.content}`);
}

metadataExample();

Async Usage

Basic Async

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function asyncChat() {
  try {
    const response = await sudo.router.create({
      model: "claude-sonnet-4-20250514",
      messages: [{ role: "user", content: "Hello async world!" }]
    });
    
    return response.choices[0].message.content;
  } catch (error) {
    console.error("Error in async chat:", error);
    throw error;
  }
}

// Usage
asyncChat()
  .then(result => console.log(result))
  .catch(error => console.error("Failed:", error));

Concurrent Requests

TypeScript
import { Sudo } from "sudo-ai";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function processMultipleQueries() {
  const queries = [
    "What is machine learning?",
    "Explain neural networks",
    "What is deep learning?",
    "How do transformers work?"
  ];
  
  try {
    // Process all queries concurrently
    const promises = queries.map(query =>
      sudo.router.create({
        model: "gpt-4o",
        messages: [{ role: "user", content: query }],
        maxCompletionTokens: 100
      })
    );
    
    const responses = await Promise.all(promises);
    
    // Display results
    responses.forEach((response, index) => {
      console.log(`Q: ${queries[index]}`);
      console.log(`A: ${response.choices[0].message.content}`);
      console.log(`Tokens: ${response.usage.totalTokens}`);
      console.log("---");
    });
    
  } catch (error) {
    console.error("Error processing queries:", error);
  }
}

processMultipleQueries();

Error Handling

TypeScript
import { Sudo } from "sudo-ai";
import * as errors from "sudo-ai/models/errors";

const sudo = new Sudo({
  serverURL: "https://sudoapp.dev/api",
  apiKey: process.env.SUDO_API_KEY ?? "",
});

async function errorHandlingExample() {
  try {
    const response = await sudo.router.create({
      model: "non-existent-model",  // This will cause an error
      messages: [{ role: "user", content: "Hello" }]
    });
    
    console.log(response.choices[0].message.content);
    
  } catch (error) {
    if (error instanceof errors.SudoError) {
      console.log(`SDK Error: ${error.message}`);
      console.log(`Status Code: ${error.statusCode}`);
      console.log(`Response Body: ${error.body}`);
      
      // Handle specific error types
      if (error instanceof errors.ErrorResponse) {
        console.log(`Error Details: ${error.data$.error}`);
      }
    } else {
      console.log(`Unexpected error: ${error}`);
    }
  }
}

errorHandlingExample();

Advanced Examples

Chat Bot with Context Management

TypeScript
import { Sudo } from "sudo-ai";
import * as readline from 'readline';

interface ChatResponse {
  response?: string;
  tokensUsed?: number;
  model?: string;
  error?: string;
}

class ChatBot {
  private sudo: Sudo;
  private model: string;
  private maxHistory: number;
  private conversationHistory: Array<{role: string; content: string}>;

  constructor(model: string = "claude-sonnet-4-20250514", maxHistory: number = 10) {
    this.sudo = new Sudo({
      serverURL: "https://sudoapp.dev/api",
      apiKey: process.env.SUDO_API_KEY ?? "",
    });
    this.model = model;
    this.maxHistory = maxHistory;
    this.conversationHistory = [
      {
        role: "system",
        content: `You are a helpful assistant. Current time: ${new Date().toISOString()}`
      }
    ];
  }

  async chat(message: string): Promise<ChatResponse> {
    try {
      // Add user message
      this.conversationHistory.push({ role: "user", content: message });

      // Trim history if too long
      if (this.conversationHistory.length > this.maxHistory) {
        // Keep system message and recent messages
        this.conversationHistory = [
          this.conversationHistory[0]  // System message
        ].concat(this.conversationHistory.slice(-(this.maxHistory - 1)));
      }

      const response = await this.sudo.router.create({
        model: this.model,
        messages: this.conversationHistory,
        temperature: 0.7
      });

      const aiResponse = response.choices[0].message.content;

      // Add AI response to history
      this.conversationHistory.push({ role: "assistant", content: aiResponse });

      return {
        response: aiResponse,
        tokensUsed: response.usage.totalTokens,
        model: response.model
      };

    } catch (error) {
      return { error: `Failed to get response: ${error}` };
    }
  }

  resetConversation(): void {
    // Reset the conversation history, keeping only the system message
    this.conversationHistory = [this.conversationHistory[0]];
  }

  getHistorySummary(): { totalMessages: number; userMessages: number; aiMessages: number } {
    const userMessages = this.conversationHistory.filter(m => m.role === "user").length;
    const aiMessages = this.conversationHistory.filter(m => m.role === "assistant").length;
    return {
      totalMessages: this.conversationHistory.length,
      userMessages,
      aiMessages
    };
  }
}

// Usage
async function runChatBot() {
  const bot = new ChatBot();
  
  const rl = readline.createInterface({
    input: process.stdin,
    output: process.stdout
  });

  const askQuestion = (query: string): Promise<string> =>
    new Promise(resolve => rl.question(query, resolve));

  console.log("Chat started! Type 'quit', 'exit', 'reset', or 'summary'");

  while (true) {
    try {
      const userInput = await askQuestion("You: ");
      
      if (userInput.toLowerCase() === 'quit' || userInput.toLowerCase() === 'exit') {
        break;
      } else if (userInput.toLowerCase() === 'reset') {
        bot.resetConversation();
        console.log("Conversation reset!");
        continue;
      } else if (userInput.toLowerCase() === 'summary') {
        const summary = bot.getHistorySummary();
        console.log(`Conversation summary: ${JSON.stringify(summary)}`);
        continue;
      }

      const result = await bot.chat(userInput);
      if (result.error) {
        console.log(`Error: ${result.error}`);
      } else {
        console.log(`AI: ${result.response}`);
        console.log(`(Tokens: ${result.tokensUsed}, Model: ${result.model})`);
      }
    } catch (error) {
      console.error("Unexpected error:", error);
    }
  }

  rl.close();
}

runChatBot();

Next Steps

Now that you understand AI responses:
  1. Explore Streaming - Add real-time streaming responses
  2. Try Tool Calling - Enable function calling capabilities
  3. Structured Output - Generate JSON and structured data
  4. Image Input - Work with multimodal inputs
Start with simple use cases and gradually add complexity. Use appropriate models for your needs - faster models for simple tasks, more capable models for complex reasoning. Both Python and TypeScript SDKs offer the same powerful capabilities with language-specific optimizations.