val result = RunAnywhere.generate(
prompt = "Write a haiku about Kotlin programming",
options = LLMGenerationOptions(
maxTokens = 50,
temperature = 1.0f,
topP = 0.9f,
stopSequences = listOf("###")
)
)
println("Response: ${result.text}")
println("Model: ${result.modelUsed}")
println("Tokens: ${result.tokensUsed}")
println("Speed: ${result.tokensPerSecond} tok/s")
println("Latency: ${result.latencyMs}ms")
// For reasoning models (e.g., models with thinking capability)
result.thinkingContent?.let { thinking ->
println("Reasoning: $thinking")
}