Here's a detailed breakdown of implementing AI in a .NET MAUI application, covering all major approaches:
Option 1: Cloud-Based AI (Azure Cognitive Services)
Step 1: Set Up Azure Resources
Go to Azure Portal
Create a Cognitive Services resource
Note your:
Endpoint URL (
https://<your-region>.api.cognitive.microsoft.com
)API Key
Step 2: Add Required NuGet Packages
dotnet add package Microsoft.Azure.CognitiveServices.Vision.ComputerVision
Step 3: Implement Service Class
public class AzureVisionService { private readonly ComputerVisionClient _client; public AzureVisionService(string endpoint, string key) { _client = new ComputerVisionClient(new ApiKeyServiceClientCredentials(key)) { Endpoint = endpoint }; } public async Task<string> AnalyzeImage(Stream imageStream) { var features = new List<VisualFeatureTypes?> { VisualFeatureTypes.Description, VisualFeatureTypes.Tags }; var result = await _client.AnalyzeImageInStreamAsync(imageStream, features); return string.Join(", ", result.Description.Captions.Select(c => c.Text)); } }
Step 4: Register Service in MAUI
// In MauiProgram.cs builder.Services.AddSingleton<AzureVisionService>(new AzureVisionService( endpoint: "YOUR_ENDPOINT", key: "YOUR_KEY"));
Step 5: Use in Page
private async void OnAnalyzeClicked(object sender, EventArgs e) { var fileResult = await FilePicker.Default.PickAsync(); if (fileResult == null) return; using var stream = await fileResult.OpenReadAsync(); var result = await _azureVisionService.AnalyzeImage(stream); await DisplayAlert("Analysis Result", result, "OK"); }
Option 2: Local AI with ONNX Runtime
Step 1: Get a Pretrained ONNX Model
Download models from:
Add to
Resources/Raw
folder in MAUI projectSet Build Action =
MauiAsset
Step 2: Add NuGet Package
dotnet add package Microsoft.ML.OnnxRuntime
Step 3: Create Inference Service
public class OnnxInferenceService { private InferenceSession _session; public async Task LoadModel() { // For MAUI, load from embedded resources using var stream = await FileSystem.OpenAppPackageFileAsync("model.onnx"); _session = new InferenceSession(stream); } public float[] Predict(float[] inputData) { var inputTensor = new DenseTensor<float>(inputData, new[] { 1, inputData.Length }); var inputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromTensor("input_name", inputTensor) }; using var results = _session.Run(inputs); return results.First().AsTensor<float>().ToArray(); } }
Step 4: Use in Application
// Initialize var onnxService = new OnnxInferenceService(); await onnxService.LoadModel(); // Prediction var input = new float[] { /* your data */ }; var result = onnxService.Predict(input);
Option 3: Custom Models with ML.NET
Step 1: Create Model Training Project
Create a separate .NET Console App
Install packages:
dotnet add package Microsoft.ML
Step 2: Train and Export Model
var mlContext = new MLContext(); // Load data var data = mlContext.Data.LoadFromTextFile<ModelInput>("data.csv", separatorChar: ','); // Build pipeline var pipeline = mlContext.Transforms .Concatenate("Features", nameof(ModelInput.Feature1), nameof(ModelInput.Feature2)) .Append(mlContext.Regression.Trainers.Sdca()); // Train var model = pipeline.Fit(data); // Save mlContext.Model.Save(model, data.Schema, "model.zip");
Step 3: Add Model to MAUI
Copy
model.zip
toResources/Raw
Set Build Action =
MauiAsset
Step 4: Implement Prediction Service
public class MLModelService { private PredictionEngine<ModelInput, ModelOutput> _engine; public async Task LoadModel() { var mlContext = new MLContext(); var stream = await FileSystem.OpenAppPackageFileAsync("model.zip"); var model = mlContext.Model.Load(stream, out _); _engine = mlContext.Model.CreatePredictionEngine<ModelInput, ModelOutput>(model); } public ModelOutput Predict(ModelInput input) => _engine.Predict(input); }
Option 4: Python Integration (Advanced)
Step 1: Set Up Python Environment
Install Python 3.x
Install required packages:
pip install numpy tensorflow torch
Step 2: Add Python.NET to MAUI
dotnet add package Python.NET
Step 3: Implement Python Wrapper
public class PythonMLService : IDisposable { private dynamic _pythonModule; public PythonMLService() { PythonEngine.Initialize(); using (Py.GIL()) { dynamic sys = Py.Import("sys"); sys.path.append("/path/to/your/python/scripts"); _pythonModule = Py.Import("your_ml_module"); } } public float Predict(float[] input) { using (Py.GIL()) { var pyArray = new PyList(input.Select(x => new PyFloat(x)).ToArray()); return (float)_pythonModule.predict(pyArray); } } public void Dispose() => PythonEngine.Shutdown(); }
Final Considerations
Performance Testing: Test on all target platforms (Android can be slower)
Error Handling: Add try-catch for network/GPU failures
Privacy: For sensitive data, prefer local models
Model Optimization: Quantize models for mobile deployment
Caching: Cache predictions when possible
No comments:
Post a Comment