orx-kinect refactoring + new general orx-depth-camera (#257)

This commit is contained in:
Kazik Pogoda
2022-08-24 20:53:50 +02:00
committed by GitHub
parent c398aaa392
commit b7fc8918f4
39 changed files with 1792 additions and 725 deletions

View File

@@ -1,73 +0,0 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.ColorFormat
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.kinect.*
import org.openrndr.extra.kinect.v1.getKinectsV1
/**
* Shows 4 different representations of the depth map.
* <ol>
* <li>the original depth map stored as RED channel values</li>
* <li>the same values expressed as gray tones</li>
* <li>
* color map according to natural light dispersion as described
* by Alan Zucconi in the
* <a href="https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/">Improving the Rainbow</a>
* article.
* </li>
* <li>
* color map according to
* <a href="https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html">
* Turbo, An Improved Rainbow Colormap for Visualization
* </a>
* by Google.
* </li>
* </ol>
*
* @see DepthToGrayscaleMapper
* @see DepthToColorsZucconi6Mapper
* @see DepthToColorsTurboMapper
*/
fun main() = application {
configure {
width = 2 * 640
height = 2 * 480
}
program {
val kinects = getKinectsV1(this)
val kinect = kinects.startDevice()
kinect.depthCamera.enabled = true
kinect.depthCamera.mirror = true
val camera = kinect.depthCamera
val grayscaleFilter = DepthToGrayscaleMapper()
val zucconiFilter = DepthToColorsZucconi6Mapper()
val turboFilter = DepthToColorsTurboMapper()
val grayscaleBuffer = kinectColorBuffer(camera)
val zucconiBuffer = kinectColorBuffer(camera)
val turboBuffer = kinectColorBuffer(camera)
extend {
/*
* Note: getting the latest frame this way will guarantee
* that filters are being applied only if the actual new frame
* from kinect was received. Kinect has different refresh rate
* than usual screen (30 fps).
*/
kinect.depthCamera.getLatestFrame()?.let { frame ->
grayscaleFilter.apply(frame, grayscaleBuffer)
zucconiFilter.apply(frame, zucconiBuffer)
turboFilter.apply(frame, turboBuffer)
}
drawer.image(camera.currentFrame)
drawer.image(grayscaleBuffer, camera.width.toDouble(), 0.0)
drawer.image(turboBuffer, 0.0, camera.height.toDouble())
drawer.image(zucconiBuffer, camera.width.toDouble(), camera.height.toDouble())
}
}
}
private fun kinectColorBuffer(camera: KinectCamera): ColorBuffer {
return colorBuffer(camera.width, camera.height, format = ColorFormat.RGB)
}

View File

@@ -1,10 +1,10 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.getKinectsV1
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Basic kinect use case showing continuous stream from the depth camera.
* Basic kinect1 use case showing continuous stream from the depth camera.
*
* Note: kinect depth map is stored only on the RED color channel to save
* space. Therefore depth map is displayed only in the red tones.
@@ -15,12 +15,12 @@ fun main() = application {
height = 480
}
program {
val kinects = getKinectsV1(this)
val kinect = kinects.startDevice()
kinect.depthCamera.enabled = true
kinect.depthCamera.mirror = true
val kinect = extend(Kinect1())
val device = kinect.openDevice()
device.depthCamera.flipH = true // to make a mirror
device.depthCamera.enabled = true
extend {
drawer.image(kinect.depthCamera.currentFrame)
drawer.image(device.depthCamera.currentFrame)
}
}
}

View File

@@ -0,0 +1,111 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.fx.colormap.TurboColormap
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.kinect.v1.Kinect1
import org.openrndr.extra.parameters.BooleanParameter
import org.openrndr.extra.parameters.DoubleParameter
/**
* A use case where "virtual walls" can be established within certain
* depth ranges. Useful for actual installations, like interactive
* projections in the form of a "mirror" for the human silhouette.
* The measurement in meters helps in calibration.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
camera.flipH = true // to make a mirror
camera.depthMeasurement = DepthMeasurement.METERS
val turboColormap = TurboColormap().apply {
minValue = .5
maxValue = 5.0
curve = 1.0
}
val outputBuffer = colorBuffer(
camera.resolution.x,
camera.resolution.y
)
/*
* Note: the code specified in onFrameReceived will be executed as soon as
* possible, also when GPU is idle.
*
* Also TurboColormap filter will be applied only after actual new frame
* from kinect is received instead of being applied for each
* program frame. Kinect has different refresh rate (30 fps) than usual
* display.
*/
camera.onFrameReceived { frame ->
turboColormap.apply(frame, outputBuffer)
}
camera.enabled = true
@Suppress("unused")
val settings = object {
@BooleanParameter(label = "enabled", order = 0)
var enabled: Boolean
get() = camera.enabled
set(value) {
camera.enabled = value
}
@BooleanParameter(label = "flipH", order = 1)
var flipH: Boolean
get() = camera.flipH
set(value) {
camera.flipH = value
}
@BooleanParameter(label = "flipV", order = 2)
var flipV: Boolean
get() = camera.flipV
set(value) {
camera.flipV = value
}
/*
Note: we could use turboColormap parameters directly in the GUI, however the
high range is cap to 1.0 there, and we want to use calibration in meters.
Increase 5.0 to something higher if you are calibrating for a bigger space.
*/
@DoubleParameter(label = "min distance", order = 3, low = 0.2, high = 5.0)
var minDistance: Double
get() = turboColormap.minValue
set(value) {
turboColormap.minValue = value
}
@DoubleParameter(label = "max distance", order = 4, low = 0.2, high = 5.0)
var maxDistance: Double
get() = turboColormap.maxValue
set(value) { turboColormap.maxValue = value }
@DoubleParameter(label = "distance curve", order = 5, low = 0.01, high = 10.0)
var curve: Double
get() = turboColormap.curve
set(value) {
turboColormap.curve = value
}
}
extend(GUI()) {
persistState = false
compartmentsCollapsedByDefault = false
add(settings, label = "depth camera")
}
extend {
drawer.image(outputBuffer)
}
}
}

View File

@@ -0,0 +1,93 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.ColorFormat
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.fx.colormap.GrayscaleColormap
import org.openrndr.extra.fx.colormap.SpectralZucconiColormap
import org.openrndr.extra.fx.colormap.TurboColormap
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.kinect.v1.Kinect1
import org.openrndr.extra.parameters.BooleanParameter
import org.openrndr.math.Vector2
/**
* Shows 4 different color representations of the depth map:
*
* * the original depth map stored as RED channel values
* * the same values expressed as gray tones
* * zucconi6 color map according to natural light dispersion as described
* by Alan Zucconi in
* [Improving the Rainbow](https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/)
* article
* * turbo color map according to
* [Turbo, An Improved Rainbow Colormap for Visualization](https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html)
* by Google.
*
* Note: the values are normalized in range 0-1, not in meters.
* @see GrayscaleColormap
* @see SpectralZucconiColormap
* @see TurboColormap
*/
fun main() = application {
val guiOffset = 200
configure {
width = 2 * 640 + guiOffset
height = 2 * 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
fun outputBuffer() = colorBuffer(
camera.resolution.x,
camera.resolution.y,
format = ColorFormat.RGB
)
val grayscaleColormap = GrayscaleColormap()
val spectralZucconiColormap = SpectralZucconiColormap()
val turboColormap = TurboColormap()
val grayscaleBuffer = outputBuffer()
val zucconiBuffer = outputBuffer()
val turboBuffer = outputBuffer()
@Suppress("unused")
val settings = object {
@BooleanParameter(label = "enabled", order = 0)
var enabled: Boolean
get() = camera.enabled
set(value) { camera.enabled = value }
@BooleanParameter(label = "flipH", order = 1)
var flipH: Boolean
get() = camera.flipH
set(value) { camera.flipH = value }
@BooleanParameter(label = "flipV", order = 2)
var flipV: Boolean
get() = camera.flipV
set(value) { camera.flipV = value }
}
camera.onFrameReceived { frame ->
grayscaleColormap.apply(frame, grayscaleBuffer)
spectralZucconiColormap.apply(frame, zucconiBuffer)
turboColormap.apply(frame, turboBuffer)
}
camera.enabled = true
extend(GUI()) {
persistState = false
compartmentsCollapsedByDefault = false
add(settings, label = "depth camera")
add(grayscaleColormap)
add(spectralZucconiColormap)
add(turboColormap)
}
extend {
drawer.image(camera.currentFrame, guiOffset.toDouble(), 0.0)
drawer.image(grayscaleBuffer, guiOffset + camera.resolution.x.toDouble(), 0.0)
drawer.image(turboBuffer, guiOffset.toDouble(), camera.resolution.y.toDouble())
drawer.image(zucconiBuffer, Vector2(guiOffset.toDouble(), 0.0) + camera.resolution.vector2)
}
}
}

View File

@@ -0,0 +1,31 @@
package org.openrndr.extra.kinect.v1.demo
import org.bytedeco.libfreenect.global.freenect
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* This demo shows how to execute freenect commands directly, either globally
* or on the device. In this case demo is switching off LED light completely,
* which might be desirable for the aesthetics of an installation,
* however LED turned on might be still a useful indicator during development.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
device.executeInFreenectDeviceContext(
"turn off led"
) { _, _, dev ->
freenect.freenect_set_led(dev, freenect.LED_OFF)
}
device.depthCamera.enabled = true
extend {
drawer.image(device.depthCamera.currentFrame)
}
}
}

View File

@@ -0,0 +1,32 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Render depth data from 2 kinect1 devices side-by-side.
*/
fun main() = application {
configure {
width = 640 * 2
height = 480
}
program {
val kinect = extend(Kinect1())
/*
on production system you might consider using stable kinect serial numbers,
instead of index numbers, to avoid reordering of devices already installed
in physical space.
*/
val depthCamera1 = kinect.openDevice(0).depthCamera
val depthCamera2 = kinect.openDevice(1).depthCamera
depthCamera1.enabled = true
depthCamera1.flipH = true
depthCamera2.enabled = true
depthCamera2.flipH = true
extend {
drawer.image(depthCamera1.currentFrame)
drawer.image(depthCamera2.currentFrame, depthCamera1.resolution.x.toDouble(), 0.0)
}
}
}

View File

@@ -0,0 +1,44 @@
package org.openrndr.extra.kinect.v1.demo
import org.bytedeco.libfreenect.global.freenect
import org.bytedeco.libfreenect.global.freenect.*
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Even though this library is abstracting freenect access, it is still
* possible to call any low level kinect API through execute methods.
* The calls are executed in separate kinect runner thread but they will
* block the calling thread until the result is returned.
*/
fun main() = application {
program {
val kinect = extend(Kinect1())
/*
Blocking version will wait for the result, specifying the name
makes it easier to identify this call in logs when it is finally
executed on kinect. Note: enabling TRACE log level is required.
*/
val numberOfKinectDevices = kinect.executeInFreenectContextBlocking(
name = "numberOfKinectDevices"
) { ctx, _ ->
freenect.freenect_num_devices(ctx)
}
println("numberOfKinectDevices: $numberOfKinectDevices")
val device = kinect.openDevice()
val maxTilt = 90.0
var tilt = 0.0
extend {
device.executeInFreenectDeviceContext("disco LED") { _, _, dev ->
freenect_set_led(dev, (seconds * 10).toInt() % 7) // disco
}
val newTilt = if ((seconds % 10) < 5) -maxTilt else maxTilt
if (tilt != newTilt) {
device.executeInFreenectDeviceContext("tilt change") { _, _, dev ->
freenect_set_tilt_degs(dev, tilt)
}
tilt = newTilt
}
}
}
}

View File

@@ -0,0 +1,30 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Here you can see freenect FLOOD log level in action.
*
* Note: technically it would be possible to redirect kinect log to
* slf4j logger in the implementation of [Kinect1], however I removed
* this callback and left logs on the standard out, because it might get so noisy,
* that native-to-JVM round trip with conversion into [String] for JVM
* logging might completely kill the performance and result in
* stack overflow exception.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
kinect.logLevel = Kinect1.LogLevel.FLOOD
val device = kinect.openDevice()
device.depthCamera.enabled = true
extend {
drawer.image(device.depthCamera.currentFrame)
}
}
}

View File

@@ -0,0 +1,49 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.Filter
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.filterShaderFromCode
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.kinect.v1.Kinect1
/**
* It is possible to rewrite raw kinect value interpretation completely
* while keeping all the performance characteristics.
*
* Note: when depth measurement is set to RAW, the flip options does not apply.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
camera.depthMeasurement = DepthMeasurement.RAW
val outputBuffer = colorBuffer(camera.resolution.x, camera.resolution.y)
val filter = Filter(filterShaderFromCode("""
layout(origin_upper_left) in vec4 gl_FragCoord;
uniform usampler2D tex0; // kinect raw
out vec4 o_color;
void main() {
ivec2 uv = ivec2(gl_FragCoord);
uint uintDepth = texelFetch(tex0, uv, 0).r;
float depth = float(uintDepth) / 2047.;
o_color = vec4(vec3(depth), 1.);
}
""".trimIndent(),
"raw filter")
)
camera.onFrameReceived { frame ->
filter.apply(frame, outputBuffer)
}
device.depthCamera.enabled = true
extend {
drawer.image(outputBuffer)
}
}
}

View File

@@ -0,0 +1,117 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.Fullscreen
import org.openrndr.application
import org.openrndr.draw.Filter
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.filterShaderFromCode
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.depth.camera.calibrator.DepthCameraCalibrator
import org.openrndr.extra.depth.camera.calibrator.isolatedWithCalibration
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.kinect.v1.Kinect1
/**
* How to use [DepthCameraCalibrator] with [Kinect1]?
*/
fun main() = application {
configure {
fullscreen = Fullscreen.CURRENT_DISPLAY_MODE
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
// depth measurement in meters is required by the calibrator
camera.depthMeasurement = DepthMeasurement.METERS
val kinectResolution = camera.resolution
val outputBuffer = colorBuffer(
kinectResolution.x,
kinectResolution.y
)
// simple visual effect applied to kinect data
val spaceRangeExtractor = SpaceRangeExtractor()
camera.onFrameReceived { frame ->
spaceRangeExtractor.apply(frame, outputBuffer)
}
val calibrator = DepthCameraCalibrator(this, camera)
val gui = GUI()
calibrator.addControlsTo(gui)
/*
Note: remember that extend(gui) has to be called after all the parameter
controls are added.
Also extensions are rendered in reverse order, if we start with gui,
it will not be covered by calibrator view when calibrator is enabled
*/
extend(gui)
/*
if it's an interactive installation, probably we don't want to
show GUI on startup. It can be shown by pressing F11.
*/
gui.visible = false
/*
Registering this callback here after gui will prevent it from
being triggered multiple times when GUI parameters are restored
on startup.
*/
calibrator.onCalibrationChange { calibration ->
spaceRangeExtractor.minDepth = calibration.minDepth
spaceRangeExtractor.maxDepth = calibration.maxDepth
}
extend(calibrator)
camera.enabled = true
extend {
val calibration = calibrator.getCalibration(camera)
drawer.isolatedWithCalibration(calibration) {
image(
colorBuffer = outputBuffer,
position = calibration.position,
width = calibration.width,
height = calibration.height
)
}
}
// switching calibrator view on and off with keyboard
program.keyboard.keyDown.listen {
if (it.name == "k") {
calibrator.enabled = !calibrator.enabled
}
}
}
}
/**
* A visual effect applied to kinect data in this demonstration.
* Everything is black, except for the white pixels within the range
* of 2 virtual walls positioned at [minDepth] at front of the
* viewer and [maxDepth] behind the viewer.
*/
class SpaceRangeExtractor : Filter(filterShaderFromCode("""
uniform sampler2D tex0; // kinect raw
uniform float minDepth;
uniform float maxDepth;
out vec4 o_color;
void main() {
ivec2 uv = ivec2(gl_FragCoord.xy);
float depth = texelFetch(tex0, uv, 0).r;
float luma = ((depth >= minDepth) && (depth <= maxDepth)) ? 1.0 : 0.0;
o_color = vec4(vec3(luma), 1.0);
}
""".trimIndent(),
"space range extractor"
)) {
var minDepth: Double by parameters
var maxDepth: Double by parameters
}

View File

@@ -1,33 +0,0 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.getKinectsV1
/**
* Stream from 2 kinects side by side.
*/
fun main() = application {
configure {
width = 640 * 2
height = 480
}
program {
val kinects = getKinectsV1(this)
val depthCamera1 = kinects.startDevice(0).depthCamera
val depthCamera2 = kinects.startDevice(1).depthCamera
depthCamera1.enabled = true
depthCamera1.mirror = true
depthCamera2.enabled = true
depthCamera2.mirror = true
extend {
drawer.image(depthCamera1.currentFrame)
drawer.image(depthCamera2.currentFrame, depthCamera1.width.toDouble(), 0.0)
}
keyboard.keyDown.listen { keyEvent ->
if (keyEvent.name == "1") {depthCamera1.enabled = !depthCamera1.enabled }
if (keyEvent.name == "2") {depthCamera2.enabled = !depthCamera2.enabled }
if (keyEvent.name == "q") {depthCamera1.mirror = !depthCamera1.mirror }
if (keyEvent.name == "w") {depthCamera2.mirror = !depthCamera2.mirror }
}
}
}

View File

@@ -1,43 +0,0 @@
package org.openrndr.extra.kinect.v1.demo
import org.bytedeco.libfreenect.global.freenect
import org.bytedeco.libfreenect.global.freenect.*
import org.openrndr.application
import org.openrndr.extra.kinect.v1.getKinectsV1
import java.lang.RuntimeException
/**
* Even though this library is abstracting freenect access, it is still
* possible to call any low level kinect API through execute methods.
* The calls are executed in separate kinect runner thread but they will
* block the calling thread until the result is returned.
*/
fun main() = application {
program {
val kinects = getKinectsV1(this)
// the same as calling kinects.countDevices(), here to show that any value might be returned from execute
val num = kinects.execute { ctx -> freenect_num_devices(ctx.fnCtx) }
if (num == 0) { throw RuntimeException("no kinect detected") }
kinects.execute { ctx ->
freenect_set_log_level(ctx.fnCtx, freenect.FREENECT_LOG_FLOOD) // lots of logs
}
kinects.execute { ctx ->
// extra FREENECT_DEVICE_MOTOR gives control over tilt and LEDs
freenect_select_subdevices(ctx.fnCtx, FREENECT_DEVICE_CAMERA xor FREENECT_DEVICE_MOTOR)
}
val kinect = kinects.startDevice()
var tilt = 90.0
extend {
kinect.execute { ctx ->
freenect_set_led(ctx.fnDev, (seconds * 10).toInt() % 7) // disco
}
val currentTilt = if ((seconds % 10) < 5) -90.0 else 90.0
if (currentTilt != tilt) {
kinect.execute { ctx ->
freenect_set_tilt_degs(ctx.fnDev, currentTilt)
}
tilt = currentTilt
}
}
}
}