<?xml version="1.0" encoding="utf-8" ?>
<ContentPage
x:Class="AIApp.MainPage"
xmlns="http://xamarin.com/schemas/2014/forms"
xmlns:x="http://schemas.microsoft.com/winfx/2009/xaml"
xmlns:ios="clr-namespace:Xamarin.Forms.PlatformConfiguration.iOSSpecific;assembly=Xamarin.Forms.Core"
xmlns:local="clr-namespace:AIApp"
Title="Safe Area"
ios:Page.UseSafeArea="True">
<StackLayout>
<Image
x:Name="picture"
Aspect="AspectFill"
VerticalOptions="FillAndExpand" />
<Label x:Name="output" HorizontalOptions="CenterAndExpand" />
<StackLayout Orientation="Horizontal">
<Button
Clicked="PickPhotoButton_Clicked"
HorizontalOptions="FillAndExpand"
Text="Pick a picture" />
<Button
Clicked="TakePhotoButton_Clicked"
HorizontalOptions="FillAndExpand"
Text="Take a picture" />
</StackLayout>
</StackLayout>
</ContentPage>
using Plugin.Media;
using Plugin.Media.Abstractions;
using System;
using System.Threading.Tasks;
using Xamarin.Forms;
namespace AIApp
{
public partial class MainPage : ContentPage
{
public MainPage()
{
InitializeComponent();
}
private async void TakePhotoButton_Clicked(object sender, EventArgs e)
{
await ProcessPhotoAsync(true);
}
private async void PickPhotoButton_Clicked(object sender, EventArgs e)
{
await ProcessPhotoAsync(false);
}
private async Task ProcessPhotoAsync(bool useCamera)
{
await CrossMedia.Current.Initialize();
if (useCamera ? !CrossMedia.Current.IsTakePhotoSupported : !CrossMedia.Current.IsPickPhotoSupported)
{
await DisplayAlert("Info", "Your phone doesn't support photo feature.", "OK");
return;
}
var photo = useCamera ?
await CrossMedia.Current.TakePhotoAsync(new StoreCameraMediaOptions()) :
await CrossMedia.Current.PickPhotoAsync();
if (photo == null)
{
picture.Source = null;
return;
}
picture.Source = ImageSource.FromFile(photo.Path);
var service = DependencyService.Get<IPhotoDetector>();
if (service == null)
{
await DisplayAlert("Info", "Not implemented the feature on your device.", "OK");
return;
}
using (var s = photo.GetStream())
{
var result = await service.DetectAsync(s);
output.Text = $"It looks like a {result}";
}
}
}
}
using System.IO;
using System.Threading.Tasks;
namespace AIApp
{
public interface IPhotoDetector
{
Task<FriesOrNotFriesTag> DetectAsync(Stream photo);
}
public enum FriesOrNotFriesTag
{
None,
Fries,
NotFries,
}
}
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Windows.AI.MachineLearning;
using Windows.Graphics.Imaging;
using Windows.Media;
using Windows.Storage;
using Xamarin.Forms;
[assembly: Dependency(typeof(AIApp.UWP.PhotoDetector))]
namespace AIApp.UWP
{
public class PhotoDetector : IPhotoDetector
{
private FriesOrNotFriesModel _model;
public async Task DetectAsync(Stream photo)
{
await InitializeModelAsync();
var bitmapDecoder = await BitmapDecoder.CreateAsync(photo.AsRandomAccessStream());
var output = await _model.EvaluateAsync(new FriesOrNotFriesInput
{
data = ImageFeatureValue.CreateFromVideoFrame(VideoFrame.CreateWithSoftwareBitmap(await bitmapDecoder.GetSoftwareBitmapAsync())),
});
var label = output.classLabel.GetAsVectorView().FirstOrDefault();
return Enum.Parse(label);
}
private async Task InitializeModelAsync()
{
if (_model != null)
{
return;
}
var onnx = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Assets/FriesOrNotFries.onnx"));
_model = await FriesOrNotFriesModel.CreateFromStreamAsync(onnx);
}
}
}
using Android.Graphics;
using Org.Tensorflow.Contrib.Android;
using Plugin.CurrentActivity;
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Xamarin.Forms;
[assembly: Dependency(typeof(AIApp.Droid.PhotoDetector))]
namespace AIApp.Droid
{
public class PhotoDetector : IPhotoDetector
{
private static readonly string ModelFile = "model.pb";
private static readonly string LabelFile = "labels.txt";
private static readonly string InputName = "Placeholder";
private static readonly string OutputName = "loss";
private static readonly int InputSize = 227;
private readonly TensorFlowInferenceInterface _inferenceInterface;
private readonly string[] _labels;
public PhotoDetector()
{
_inferenceInterface = new TensorFlowInferenceInterface(CrossCurrentActivity.Current.Activity.Assets, ModelFile);
using (var sr = new StreamReader(CrossCurrentActivity.Current.Activity.Assets.Open(LabelFile)))
{
_labels = sr.ReadToEnd().Split('\n').Select(x => x.Trim()).Where(x => !string.IsNullOrEmpty(x)).ToArray();
}
}
public async Task DetectAsync(Stream photo)
{
var bitmap = await BitmapFactory.DecodeStreamAsync(photo);
var floatValues = GetBitmapPixels(bitmap);
var outputs = new float[_labels.Length];
_inferenceInterface.Feed(InputName, floatValues, 1, InputSize, InputSize, 3);
_inferenceInterface.Run(new[] { OutputName });
_inferenceInterface.Fetch(OutputName, outputs);
var index = Array.IndexOf(outputs, outputs.Max());
return (FriesOrNotFriesTag)Enum.Parse(typeof(FriesOrNotFriesTag), _labels[index]);
}
private async Task LoadByteArrayFromAssetsAsync(string name)
{
using (var s = CrossCurrentActivity.Current.Activity.Assets.Open(name))
using (var ms = new MemoryStream())
{
await s.CopyToAsync(ms);
ms.Seek(0, SeekOrigin.Begin);
return ms.ToArray();
}
}
private static float[] GetBitmapPixels(Bitmap bitmap)
{
var floatValues = new float[InputSize * InputSize * 3];
using (var scaledBitmap = Bitmap.CreateScaledBitmap(bitmap, InputSize, InputSize, false))
{
using (var resizedBitmap = scaledBitmap.Copy(Bitmap.Config.Argb8888, false))
{
var intValues = new int[InputSize * InputSize];
resizedBitmap.GetPixels(intValues, 0, resizedBitmap.Width, 0, 0, resizedBitmap.Width, resizedBitmap.Height);
for (int i = 0; i > 8) & 0xFF) - 117);
floatValues[i * 3 + 2] = (((val >> 16) & 0xFF) - 123);
}
resizedBitmap.Recycle();
}
scaledBitmap.Recycle();
}
return floatValues;
}
}
}
using CoreFoundation;
using CoreImage;
using CoreML;
using Foundation;
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using Vision;
using Xamarin.Forms;
[assembly: Dependency(typeof(AIApp.iOS.PhotoDetector))]
namespace AIApp.iOS
{
public class PhotoDetector : IPhotoDetector
{
private readonly MLModel _mlModel;
private readonly VNCoreMLModel _model;
public PhotoDetector()
{
var assetPath = NSBundle.MainBundle.GetUrlForResource("FriesOrNotFries", "mlmodelc");
_mlModel = MLModel.Create(assetPath, out var _);
_model = VNCoreMLModel.FromMLModel(_mlModel, out var __);
}
public Task DetectAsync(Stream photo)
{
var taskCompletionSource = new TaskCompletionSource();
void handleClassification(VNRequest request, NSError error)
{
var observations = request.GetResults();
if (observations == null)
{
taskCompletionSource.SetException(new Exception("Unexpected result type from VNCoreMLRequest"));
return;
}
if (!observations.Any())
{
taskCompletionSource.SetResult(FriesOrNotFriesTag.None);
return;
}
var best = observations.First();
taskCompletionSource.SetResult((FriesOrNotFriesTag)Enum.Parse(typeof(FriesOrNotFriesTag), best.Identifier));
}
using (var data = NSData.FromStream(photo))
{
var ciImage = new CIImage(data);
var handler = new VNImageRequestHandler(ciImage, new VNImageOptions());
DispatchQueue.DefaultGlobalQueue.DispatchAsync(() =>
{
handler.Perform(new VNRequest[] { new VNCoreMLRequest(_model, handleClassification) }, out var _);
});
}
return taskCompletionSource.Task;
}
}
}
You must be a registered user to add a comment. If you've already registered, sign in. Otherwise, register and sign in.