Skip to main content
The chat() method provides the simplest way to generate text responses. It returns just the text string, making it perfect for quick interactions.

Basic Usage

let response = try await RunAnywhere.chat("What is the capital of France?")
print(response)  // "The capital of France is Paris."

Method Signature

public static func chat(_ prompt: String) async throws -> String

Parameters

ParameterTypeDescription
promptStringThe text prompt to respond to

Returns

A String containing the generated response text.

Throws

  • SDKError if the SDK is not initialized
  • SDKError if no LLM model is loaded
  • SDKError if generation fails

Examples

Simple Q&A

// Simple question
let answer = try await RunAnywhere.chat("What's 2 + 2?")
print(answer)  // "4" or "The answer is 4."

// More complex question
let explanation = try await RunAnywhere.chat(
    "Explain the difference between let and var in Swift"
)
print(explanation)

Conversation Flow

// While chat() doesn't maintain conversation history,
// you can build context manually
var context = ""

func ask(_ question: String) async throws -> String {
    let fullPrompt = context.isEmpty
        ? question
        : "\(context)\n\nUser: \(question)"

    let response = try await RunAnywhere.chat(fullPrompt)

    // Append to context for follow-up
    context += "\nUser: \(question)\nAssistant: \(response)"

    return response
}

let response1 = try await ask("My name is Alice")
let response2 = try await ask("What's my name?")  // Will remember "Alice"

Error Handling

do {
    let response = try await RunAnywhere.chat("Hello!")
    print(response)
} catch let error as SDKError {
    switch error.code {
    case .notInitialized:
        print("SDK not initialized. Call RunAnywhere.initialize() first.")
    case .modelNotFound:
        print("No model loaded. Call RunAnywhere.loadModel() first.")
    case .generationFailed:
        print("Generation failed: \(error.message)")
    default:
        print("Error: \(error.localizedDescription)")
    }
}

When to Use chat() vs generate()

Use CaseMethod
Quick responseschat()
Need performance metricsgenerate()
Custom generation optionsgenerate()
Real-time token displaygenerateStream()
Simple prototypingchat()
Production with analyticsgenerate()

SwiftUI Example

struct ChatView: View {
    @State private var input = ""
    @State private var messages: [(String, Bool)] = []  // (text, isUser)
    @State private var isLoading = false

    var body: some View {
        VStack {
            // Messages
            ScrollView {
                ForEach(Array(messages.enumerated()), id: \.offset) { _, message in
                    HStack {
                        if message.1 { Spacer() }
                        Text(message.0)
                            .padding()
                            .background(message.1 ? Color.blue : Color.gray.opacity(0.2))
                            .foregroundColor(message.1 ? .white : .primary)
                            .cornerRadius(12)
                        if !message.1 { Spacer() }
                    }
                }
            }

            // Input
            HStack {
                TextField("Message...", text: $input)
                    .textFieldStyle(.roundedBorder)

                Button(action: sendMessage) {
                    Image(systemName: "arrow.up.circle.fill")
                        .font(.title)
                }
                .disabled(input.isEmpty || isLoading)
            }
            .padding()
        }
    }

    func sendMessage() {
        let userMessage = input
        messages.append((userMessage, true))
        input = ""
        isLoading = true

        Task {
            do {
                let response = try await RunAnywhere.chat(userMessage)
                await MainActor.run {
                    messages.append((response, false))
                    isLoading = false
                }
            } catch {
                await MainActor.run {
                    messages.append(("Error: \(error.localizedDescription)", false))
                    isLoading = false
                }
            }
        }
    }
}

generate()

For detailed metrics and custom options, use generate() →