10. Camera

SENG440 exam:

Camera API

Essentially an intent to the camera app which takes a photo and returns it.

Requires the camera permission in the app manifest.

// Get thumbnail
Intent(MediaStore.ACTION_IMAGE_CAPTURE).also { takePictureIntent ->
  takePictureIntent.resolveActivity(packageManager)?.also {

    // OPTIONAL
    // Requres full-size image requires read/write access to external storage.
    var photoFile: File = try {
      // Probably use `getExternalFilesDir(Environment.DIRECTORY_PICTURES)
      createImageFile()
    } catch (e: IOException) {
      null
    }
    photoFile?.also {
      // File provider: probably `androidx.core.content.FileProvider`
      // Must be declared in manifest in `<provider>`
      uri = FileProvider.getUriForFile(this, "com.example.android.fileprovider", it)
      takePictureIntent.putExtra(MediaStore.EXTRA_OUTPUT, uri)
    }
    // END OPTIONAL

    startActivityForResult(takePictureIntent, 1)
  }
}

...

override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?) {
  if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == RESULT_OK) {
    // Only returns a small thumbnail
    val thumbnailBitmap = data.extras.get("data") as Bitmap
    imageView.setImageBitmap(thumbnailBitmap)
  }
}

Video is similar: ACTION_VIDEO_CAPTURE, but intent.data is a URI to the file, not a thumbnail.

CameraX

For direct control of the camera:

Two versions available: camera and camera2.

However, you should prefer to use the CameraX support library, which is backwards compatible with the Camera API and makes it easier to deal with device-specific features.

It supports the ImageAnalysis class which makes it easy to perform computer vision and machine learning.

// Call when view is created

val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraFutureProvider.addListener(Runnable {
  // Camera lifecycle now bound to the lifecycle owner
  val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()

  // Surface which is passed the video stream
  val preview = Preview.Builder().build().also {
    it.setSurfaceProvider(viewFinder.createSurfaceProvider())
  }

  val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
  try {
    // Unbind existing use cases - it can only be bound to one view at a time
    cameraProvider.unbindAll()

    // Can add an an analyzer here
    cameraProvider.bindToLifecycle(this, cameraSelector, preview)
  } catch(exc: Exception) {
    Log.e(TAG, "Use case binding failed", exc)
  }
}, ContextCompat.getMainExecutor(this)) // Running on main, not UI, thread

MLKit Integration

val imageAnalysis = ImageAnalysis.Builder()
  // runs on the previews, not the full resolution frames
  .setTargetResolution(Size(1280, 720))
  .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
  .build()

imageAnalysis.setAnalyzer(executor, ImageAnalysis.Analyzer { image ->
  val rotationDegrees = image.imageInfo.rotationDegrees
  // ... analysis code
})

cameraProvider.bindToLifecycle(this as LifecycleOwner, cameraSelector, imageAnalysis, preview)

MLKit contains:

Lecture Demo

Face detection: keypoints view (drawn on a canvas) overlaid on top of full screen camera stream.

Using constraint layout with camera preview and dots view both matching parent size.

Camera:

override fun onViewCreated() {
  ...
  cameraExecutor = Executors.newSingleThreadExecutor()

  // Update points transform when view size changes

  viewFinder.post {
    setUpCamera();
  }

}

fun setUpCamera() {
  val cameraProviderFuture = ProcessCameraProvider.getInstance(requireContext())
  // runnable so that it can run on a different thread
  cameraProviderFuture.addListener(Runnable {
      cameraProvider = cameraProviderFuture.get()
      lensFacing = CameraSelector.LENS_FACING_FRONT
      bindCameraUseCases()
    }, ContextCompat.getMainExecutor(requireContext())
  )
}

fun bindCameraUseCases() {
  // Get screen metrics used to setup camera for full screen resolution
  val metrics: WindovMetrics = viewFinder.context.getSystemService(WindowManager::class.jova).currentWindowMetrics
  val screenAspectRatio = aspectRatio(metrics.bounds.width(), metrics.bounds.height())
  val rotation = vievFinder.display.rotation
  val cameraProvider = cameraProvider
  ?: throw IllegalStateException("Camera initialization failed.")
  val cameraSelector = CameraSelector.Builder().requireLensFacing(lensFacing).build()

  preview = Preview.Builder()
    // Set aspect ratio but auto resolution
    .setTargetAspectRatio(screenAspectRatio)
    // Set initial target rotation
    .setTargetRotation(rotation)
    .build()

  // So that the user can take a photo
  imageCapture = ImageCapture.Builder()
    .setCaptureMode(ImageCapture.CAPTURE_MODE_MINIMIZE_LATENCY)
    // Once again, set aspect ratio but allow CameraX to set resolution
    .setTargetAspectRatio(screenAspectRatio)
    // Set initial rotation - will need to call if this changes
    .setTargetRotation(rotation)
    .build()

  imageAnalayzer = ImageAnalysis.Builder()
    .setTargetAspectRatio(screenAspectRatio)
    .setTargetRotation(rotation)
    .build()
    .also {
      it.setAnalyzer(cameraExecutor, FaceAnalyzer({ pointsList ->
        facePointsView.points = pointsList
      }, { size ->
        // Update transform matrix
        // Aspect ratio, mirroring
        updateOverlayTransform(facePointsView, size)
      }))
    }

  // Unbind existing use cases
  cameraProvider.unbindAll()

  try {
    preview?.setSurfaceProvider(viewFinder.surfaceProvider)
    camera = camearProvider.bindToLifecycle(
      this,
      cameraSelector,
      preview,
      imageCapture,
      imageAnalyzer
    )
  } catch(e: Exception) {
    Log.e(TAG, "Failed to bind camera", e)
  }
}

class FaceAnalyzer(private val pointsListListener: PointsListListener,
                   private val sizeListener: SizeListener): ImageAnalysis.Analyzer {
  private var isAnalyzing = AtomicBoolean(false)

  private val faceDetector: FaceDetector by Lazy {
    val options = FaceDetectorOptions.Builder()
      .setContourMode (FaceDetectorOptions.CONTOUR_MODE_ALL)
      •build()
    FaceDetection.getClient(options)
  }

  private val successListener = OnSuccessListener<List<Face>> { faces ->
    isAnalyzing.set(false)
    val points = mutableListOf<PointF>()
    for (face in faces) {
      val contours = face.getAllContours()
      for (contour in contours) {
        points += contour.points.map { PointF(it.x, it.y) }
      }
    }
    pointsListListener(points)
  }

  private val failureListener = OnFailureListener { e ->
    isAnalyzing.set(false)
  }

  override fun analyze(image: ImageProxy) {
    if (!isAnalyzing.get()) {
      // Analyzer may be to slow to process every frame
      isAnalyzing.set(true)
      if (image != null && image.image != null) {
        val mlKitImage = InputImage.fromMediaImage(
          cameraImage,
          image.imageInfo.rotationDegrees
        )

        faceDetector.process(mlKitImage)
          .addOnSuccessListener(successListener)
          .addOnFailureListener(failureListener)
          .addOnCompleteListener { image.close() }
      }
    }
  }
}