import com.runanywhere.sdk.public.RunAnywhere
import com.runanywhere.sdk.public.SDKEnvironment
import com.runanywhere.sdk.public.extensions.*
import com.runanywhere.sdk.core.types.InferenceFramework
class MyApplication : Application() {
override fun onCreate() {
super.onCreate()
// 1. Initialize SDK (fast, ~1-5ms)
RunAnywhere.initialize(
apiKey = "your-api-key", // Optional for development
environment = SDKEnvironment.DEVELOPMENT
)
}
}
// 2. Use in your Activity/Fragment
class MainActivity : AppCompatActivity() {
private var modelId: String? = null
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
lifecycleScope.launch {
// Register and download a model
val modelInfo = RunAnywhere.registerModel(
name = "Qwen 0.5B",
url = "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/qwen2.5-0.5b-instruct-q8_0.gguf",
framework = InferenceFramework.LLAMA_CPP
)
modelId = modelInfo.id
// Download with progress tracking
RunAnywhere.downloadModel(modelInfo.id).collect { progress ->
updateProgress((progress.progress * 100).toInt())
}
// Load and generate
RunAnywhere.loadLLMModel(modelInfo.id)
val result = RunAnywhere.generate(
prompt = "Explain quantum computing in simple terms",
options = LLMGenerationOptions(
maxTokens = 200,
temperature = 0.7f
)
)
showResponse(result.text)
Log.d("LLM", "Generated in ${result.latencyMs}ms at ${result.tokensPerSecond} tok/s")
}
}
}