orx-kinect refactoring + new general orx-depth-camera (#257)

This commit is contained in:
Kazik Pogoda
2022-08-24 20:53:50 +02:00
committed by GitHub
parent c398aaa392
commit b7fc8918f4
39 changed files with 1792 additions and 725 deletions

4
.gitignore vendored
View File

@@ -6,4 +6,6 @@ build/
*.iml/
.idea/
gradle.properties
/ShaderError.txt
/hs_err_pid*.log
/gui-parameters/
/ShaderError.glsl

View File

@@ -37,6 +37,7 @@ A growing library of assorted data structures, algorithms and utilities.
| [`orx-temporal-blur`](orx-temporal-blur/) | Post-processing temporal-blur video effect. CPU intense, therefore not intended for use with the `ScreenRecorder` extension or other real-time uses. |
| [`orx-time-operators`](orx-time-operators/) | A collection of time-sensitive functions aimed at controlling raw data over-time, such as Envelope and LFO. |
| [`orx-timer`](orx-timer/) | Simple timer functionality providing `repeat`, to run code with a given interval and `timeOut`, to run code once after a given delay. |
| [`orx-depth-camera`](orx-depth-camera/) | Common API for various depth cameras like Kinect 1 and 2. |
## JVM only

View File

@@ -35,8 +35,8 @@ def multiplatformModules = [
"orx-shader-phrases",
"orx-shapes",
"orx-quadtree",
"orx-hash-grid"
"orx-hash-grid",
"orx-depth-camera"
]
def doNotPublish = ["openrndr-demos"]

View File

@@ -0,0 +1,5 @@
// keeping this file here will stop IntelliJ from showing warning in nested relative packages
/**
* orx-color
*/
package org.openrndr.extra.color

View File

@@ -0,0 +1,26 @@
plugins {
kotlin("multiplatform")
}
kotlin {
jvm {
testRuns["test"].executionTask.configure {
useJUnitPlatform()
}
}
js(IR) {
browser()
nodejs()
}
sourceSets {
@Suppress("UNUSED_VARIABLE")
val commonMain by getting {
dependencies {
implementation(libs.openrndr.application)
implementation(libs.openrndr.math)
}
}
}
}

View File

@@ -0,0 +1,72 @@
package org.openrndr.extra.depth.camera
import org.openrndr.draw.ColorBuffer
import org.openrndr.math.IntVector2
/**
* Defines how pixel values encoded in depth [ColorBuffer] will be interpreted.
*/
enum class DepthMeasurement {
/**
* Raw values, but normalized to the range 0-1.
* Useful for debugging, because full range of captured values can be rendered
* as a texture. Therefore it's a default setting.
*/
RAW_NORMALIZED,
/**
* Raw values, exactly as they are provided by the device.
* Note: it might imply that [ColorBuffer] of the depth camera frame
* is provided in integer-based format (for example in case of Kinect devices).
*/
RAW,
/**
* Expressed in meters.
* It is using floating point numbers.
* Note: values above `1.0` will not be visible if displayed as a texture.
*/
METERS,
}
/**
* General API of any depth camera.
*/
interface DepthCamera {
/**
* Current operating resolution.
*/
val resolution: IntVector2
/**
* The units/mapping in which depth is expressed on received frames.
*/
var depthMeasurement: DepthMeasurement
/**
* Flips source depth data image in horizontal axis (mirror).
*/
var flipH: Boolean
/**
* Flips source depth data image in vertical axis (upside-down).
*/
var flipV: Boolean
/**
* The most recent frame received from the depth camera.
*/
val currentFrame: ColorBuffer
/**
* Will execute the supplied block of code with each most recent frame
* from the depth camera as an input.
*
* @param block the code to execute when the new frame is received.
*/
fun onFrameReceived(block: (frame: ColorBuffer) -> Unit)
}

View File

@@ -167,6 +167,25 @@ fun main() = application {
}
```
### Colormap
Colormap filters operate only on the RED color channel. For example
depth maps from
[orx-depth-camera](https://github.com/openrndr/orx/tree/master/orx-depth-camera).
They allow selection of `min` / `max` value range and applying exponential
shaping `curve` within this range:
- `GrayscaleColormap` - maps to gray tones
- `SpectralZucconiColormap` - maps to natural light dispersion spectrum as described
by Alan Zucconi in the
[Improving the Rainbow](https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/)
article.
- `TurboColormap` - maps to Turbo Colormap according to
[Turbo, An Improved Rainbow Colormap for Visualization](https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html)
by Google.
<!-- __demos__ >
# Demos

View File

@@ -0,0 +1,24 @@
package org.openrndr.extra.fx.colormap
import org.openrndr.draw.Filter
import org.openrndr.extra.fx.mppFilterShader
import org.openrndr.extra.parameters.DoubleParameter
abstract class ColormapFilter(code: String, name: String) : Filter(mppFilterShader(code, name)) {
@DoubleParameter(label = "min value", low = 0.0, high = 1.0, order = 0)
var minValue: Double by parameters
@DoubleParameter(label = "max value", low = 0.0, high = 1.0, order = 1)
var maxValue: Double by parameters
@DoubleParameter(label = "curve", low = 0.001, high = 10.0, order = 2)
var curve: Double by parameters
init {
minValue = 0.0
maxValue = 1.0
curve = 1.0
}
}

View File

@@ -0,0 +1,10 @@
package org.openrndr.extra.fx.colormap
import org.openrndr.extra.fx.fx_grayscale_colormap
import org.openrndr.extra.parameters.Description
/**
* Maps values of the RED color channel to grayscale.
*/
@Description("grayscale colormap")
class GrayscaleColormap : ColormapFilter(fx_grayscale_colormap, "grayscale-colormap")

View File

@@ -0,0 +1,13 @@
package org.openrndr.extra.fx.colormap
import org.openrndr.extra.fx.fx_spectral_zucconi_colormap
import org.openrndr.extra.parameters.Description
/**
* Maps values of the RED color channel to natural light dispersion spectrum as described
* by Alan Zucconi in the
* [Improving the Rainbow](https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/)
* article.
*/
@Description("spectral colormap")
class SpectralZucconiColormap : ColormapFilter(fx_spectral_zucconi_colormap, "spectral-zucconi-colormap")

View File

@@ -0,0 +1,12 @@
package org.openrndr.extra.fx.colormap
import org.openrndr.extra.fx.fx_turbo_colormap
import org.openrndr.extra.parameters.Description
/**
* Maps values of the RED color channel to Turbo Colormap according to
* [Turbo, An Improved Rainbow Colormap for Visualization](https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html)
* by Google.
*/
@Description("turbo colormap")
open class TurboColormap : ColormapFilter(fx_turbo_colormap, "turbo-colormap")

View File

@@ -0,0 +1,31 @@
#ifdef OR_IN_OUT
in vec2 v_texCoord0;
#else
varying vec2 v_texCoord0;
#endif
uniform sampler2D tex0;
uniform float minValue;
uniform float maxValue;
uniform float curve;
#ifndef OR_GL_FRAGCOLOR
out vec4 o_color;
#endif
void main() {
#ifndef OR_GL_TEXTURE2D
float red = texture(tex0, v_texCoord0).r;
#else
float red = texture2D(tex0, v_texCoord0).r;
#endif
float value = (red - minValue) / (maxValue - minValue);
vec3 color = vec3(pow(value, curve));
color *= step(value, 1.) * step(0., value);
vec4 result = vec4(color, 1.);
#ifdef OR_GL_FRAGCOLOR
gl_FragColor = result;
#else
o_color = result;
#endif
}

View File

@@ -1,8 +1,3 @@
#version 330
uniform sampler2D tex0; // kinect raw
out vec3 color;
// Spectral Colour Schemes
// By Alan Zucconi
// Website: www.alanzucconi.com
@@ -19,7 +14,20 @@ out vec3 color;
// Read "Improving the Rainbow" for more information
// http://www.alanzucconi.com/?p=6703
#ifdef OR_IN_OUT
in vec2 v_texCoord0;
#else
varying vec2 v_texCoord0;
#endif
uniform sampler2D tex0; // kinect raw
uniform float minValue;
uniform float maxValue;
uniform float curve;
#ifndef OR_GL_FRAGCOLOR
out vec4 o_color;
#endif
float saturate (float x)
{
@@ -59,10 +67,22 @@ vec3 spectral_zucconi6 (float x)
return
bump3y(c1 * (x - x1), y1) +
bump3y(c2 * (x - x2), y2) ;
bump3y(c2 * (x - x2), y2);
}
void main() {
float depth = texelFetch(tex0, ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y)), 0).r;
color = (depth >= .999) ? vec3(0) : spectral_zucconi6(depth);
#ifndef OR_GL_TEXTURE2D
float red = texture(tex0, v_texCoord0).r;
#else
float red = texture2D(tex0, v_texCoord0).r;
#endif
float value = (red - minValue) / (maxValue - minValue);
vec3 color = spectral_zucconi6(pow(value, curve));
color *= step(value, 1.) * step(0., value);
vec4 result = vec4(color, 1.);
#ifdef OR_GL_FRAGCOLOR
gl_FragColor = result;
#else
o_color = result;
#endif
}

View File

@@ -1,12 +1,4 @@
#version 330
uniform sampler2D tex0;
out vec3 color;
float saturate(in float x) {
return max(0, min(1, x));
}
// TurboColormap
// Copyright 2019 Google LLC.
// SPDX-License-Identifier: Apache-2.0
@@ -17,6 +9,25 @@ float saturate(in float x) {
// Colormap Design: Anton Mikhailov (mikhailov@google.com)
// GLSL Approximation: Ruofei Du (ruofei@google.com)
#ifdef OR_IN_OUT
in vec2 v_texCoord0;
#else
varying vec2 v_texCoord0;
#endif
uniform sampler2D tex0;
uniform float minValue;
uniform float maxValue;
uniform float curve;
#ifndef OR_GL_FRAGCOLOR
out vec4 o_color;
#endif
float saturate(in float x) {
return max(0, min(1, x));
}
vec3 TurboColormap(in float x) {
const vec4 kRedVec4 = vec4(0.13572138, 4.61539260, -42.66032258, 132.13108234);
const vec4 kGreenVec4 = vec4(0.09140261, 2.19418839, 4.84296658, -14.18503333);
@@ -36,6 +47,18 @@ vec3 TurboColormap(in float x) {
}
void main() {
float depth = texelFetch(tex0, ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y)), 0).r;
color = (depth >= .999) ? vec3(0) : TurboColormap(depth);
#ifndef OR_GL_TEXTURE2D
float red = texture(tex0, v_texCoord0).r;
#else
float red = texture2D(tex0, v_texCoord0).r;
#endif
float value = (red - minValue) / (maxValue - minValue);
vec3 color = TurboColormap(pow(value, curve));
color *= step(value, 1.) * step(0., value);
vec4 result = vec4(color, 1.);
#ifdef OR_GL_FRAGCOLOR
gl_FragColor = result;
#else
o_color = result;
#endif
}

View File

@@ -0,0 +1,5 @@
dependencies {
api(project(":orx-depth-camera"))
api(project(":orx-jvm:orx-gui"))
implementation(project(":orx-fx"))
}

View File

@@ -0,0 +1,267 @@
package org.openrndr.extra.depth.camera.calibrator
import org.openrndr.*
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.Drawer
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.isolated
import org.openrndr.extra.depth.camera.DepthCamera
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.fx.colormap.TurboColormap
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.parameters.*
import org.openrndr.math.IntVector2
import org.openrndr.math.Vector2
/**
* Depth camera calibrator extension.
*
* @param program the program using this extension, Note: normally
* we would pass program in [setup], however there is a
* cyclic dependency between GUI and calibrator, so some
* dimensions have to be established before setup.
* See Kinect1Demo10DepthCameraCalibration.kt.
* @param depthCameras depth cameras to calibrate.
*/
class DepthCameraCalibrator(
private val program: Program,
vararg depthCameras: DepthCamera
) : Extension {
init {
check(depthCameras.isNotEmpty()) {
"depthCameras cannot be empty"
}
depthCameras.forEach {
check(it.depthMeasurement == DepthMeasurement.METERS) {
"depthCameras: calibration requires depthMeasurement of each camera to be set to METERS"
}
}
}
override var enabled: Boolean
get() = commonParameters.calibratorView
set(value) { commonParameters.calibratorView = value }
private val resolution = IntVector2(program.width, program.height).vector2
private val calibrations = depthCameras.map { Calibration(it) }.toList()
private val colormap = TurboColormap()
private var onCalibrationChange: (calibration: Calibration) -> Unit =
{ _ -> } // empty on startup
override fun setup(program: Program) {
program.keyboard.keyDown.listen {
if (enabled) {
handleKeyDown(it)
}
}
}
override fun afterDraw(drawer: Drawer, program: Program) {
calibrations.forEach {
colormap.minValue = it.minDepth
colormap.maxValue = it.maxDepth
colormap.apply(it.camera.currentFrame, it.colorBuffer)
drawer.isolatedWithCalibration(it) {
image(
colorBuffer = it.colorBuffer,
position = it.position,
width = it.width,
height = it.height
)
}
}
}
fun handleKeyDown(event: KeyEvent) {
when(event.name) {
"1" -> commonParameters.allMinDepth -= CENTIMETER
"2" -> commonParameters.allMinDepth += CENTIMETER
"3" -> commonParameters.allMaxDepth -= CENTIMETER
"4" -> commonParameters.allMaxDepth += CENTIMETER
}
calibrations
.filter { it.tuneWithKeyboard }
.forEach {
when(event.key) {
KEY_ARROW_LEFT -> it.offset += Direction.LEFT * OFFSET_CHANGE_SCALE
KEY_ARROW_RIGHT -> it.offset += Direction.RIGHT * OFFSET_CHANGE_SCALE
KEY_ARROW_UP -> it.offset += Direction.UP * OFFSET_CHANGE_SCALE
KEY_ARROW_DOWN -> it.offset += Direction.DOWN * OFFSET_CHANGE_SCALE
}
when(event.name) {
"-" -> it.scale -= SCALE_CHANGE
"=" -> it.scale += SCALE_CHANGE
"l" -> it.rotation -= ROTATION_CHANGE
"r" -> it.rotation += ROTATION_CHANGE
"a" -> it.minDepth -= CENTIMETER
"s" -> it.minDepth += CENTIMETER
"d" -> it.maxDepth -= CENTIMETER
"f" -> it.maxDepth += CENTIMETER
}
}
}
fun addControlsTo(gui: GUI) {
gui.add(commonParameters)
calibrations.forEachIndexed { index, calibration ->
gui.add(calibration, label = "depth camera $index")
}
}
fun getCalibration(camera: DepthCamera): Calibration = calibrations
.find { it.camera === camera }
?: throw IllegalArgumentException("No calibration for provided depth camera")
fun onCalibrationChange(block: (calibration: Calibration) -> Unit) {
onCalibrationChange = block
calibrations.forEach { // run on first install
block(it)
}
}
private val commonParameters = @Description("calibration: all depth cameras") object {
@BooleanParameter(label = "calibrator view [k]", order = 0)
var calibratorView: Boolean = false
@DoubleParameter(label = "min depth [1/2]", low = 0.2, high = 10.0, order = 1)
var allMinDepth: Double = 0.1
set(value) {
field = value
calibrations.forEach {
it.minDepth = value
}
}
@DoubleParameter(label = "max depth [3/4]", low = 0.2, high = 10.0, order = 2)
var allMaxDepth: Double = 10.0
set(value) {
field = value
calibrations.forEach {
it.maxDepth = value
}
}
}
@Suppress("unused") // used by reflection
inner class Calibration(
val camera: DepthCamera,
val colorBuffer: ColorBuffer = colorBuffer(
camera.resolution.x,
camera.resolution.y
)
) {
@BooleanParameter(label = "tune with keyboard", order = 0)
var tuneWithKeyboard: Boolean = true
@BooleanParameter(label = "flipH", order = 1)
var flipH
get() = camera.flipH
set(value) { camera.flipH = value }
@BooleanParameter(label = "flipV", order = 2)
var flipV
get() = camera.flipV
set(value) { camera.flipV = value }
@XYParameter(
label = "offset [arrows]",
minX = -1.0,
minY = -1.0,
maxX = 1.0,
maxY = 1.0,
order = 3,
invertY = true
)
var offset: Vector2 = Vector2.ZERO
set(value) {
field = value
onCalibrationChange(this)
}
@DoubleParameter(label = "rotation [l/r]", low = -360.0, high = 360.0, order = 4)
var rotation: Double = 0.0
set(value) {
field = value
onCalibrationChange(this)
}
@DoubleParameter(label = "scale [+/-]", low = 0.0, high = 10.0, order = 5)
var scale: Double = 1.0
set(value) {
field = value
onCalibrationChange(this)
}
@DoubleParameter(label = "min depth [a/s]", low = 0.0, high = 10.0, order = 6)
var minDepth: Double = 0.2
set(value) {
field = value
onCalibrationChange(this)
}
@DoubleParameter(label = "max depth [d/f]", low = 0.0, high = 10.0, order = 7)
var maxDepth: Double = 10.0
set(value) {
field = value
onCalibrationChange(this)
}
@ActionParameter(label = "reset", order = 8)
fun reset() {
offset = Vector2.ZERO
rotation = 0.0
scale = 1.0
minDepth = 0.2
maxDepth = 10.0
}
val width: Double =
camera.resolution.x * resolution.y /
camera.resolution.y
val height: Double = resolution.y
val position: Vector2 =
-(resolution - Vector2(resolution.x - width, 0.0)) / 2.0
}
}
fun Drawer.isolatedWithCalibration(
calibration: DepthCameraCalibrator.Calibration,
block: Drawer.() -> Unit
) {
this.isolated {
translate(
IntVector2(width, height).vector2 / 2.0
+ calibration.offset * Vector2(1.0, -1.0) * height.toDouble()
)
rotate(calibration.rotation)
scale(calibration.scale)
block()
}
}
enum class Direction(val vector: Vector2) {
LEFT(Vector2(-1.0, 0.0)),
RIGHT(Vector2(1.0, 0.0)),
UP(Vector2(0.0, 1.0)),
DOWN(Vector2(0.0, -1.0));
operator fun times(scale: Double): Vector2 = this.vector * scale
}
private const val CENTIMETER = .01
private const val OFFSET_CHANGE_SCALE = .001
private const val ROTATION_CHANGE = .1
private const val SCALE_CHANGE = .001

View File

@@ -0,0 +1,3 @@
dependencies {
api(project(":orx-depth-camera"))
}

View File

@@ -1,105 +1,140 @@
package org.openrndr.extra.kinect
import org.openrndr.Extension
import org.openrndr.draw.*
import org.openrndr.extra.depth.camera.DepthCamera
import org.openrndr.math.IntVector2
import org.openrndr.resourceUrl
import java.lang.RuntimeException
import java.net.URL
import java.nio.ByteBuffer
import java.nio.ByteOrder
import kotlin.reflect.KClass
/**
* Represents all the accessible kinects handled by a specific driver (V1, V2).
*
* @param <CTX> data needed to make low level kinect support calls.
* Represents all the accessible kinects handled by a specific driver (V1, V2, etc.).
*/
interface Kinects<CTX> {
fun countDevices(): Int
interface Kinect {
/**
* Starts kinect device of a given number.
* Lists available kinect devices.
*/
fun listDevices(): List<Device.Info>
/**
* Opens kinect device of a given index.
*
* @param num the kinect device index (starts with 0). If no value specified,
* @param index the kinect device index (starts with 0). If no value specified,
* it will default to 0.
* @throws KinectException if device of such a number does not exist
* (better to count them first), or it was already started.
* @see countDevices
* @throws KinectException if device of such an index does not exist,
* or it was already started.
* @see listDevices
*/
fun startDevice(num: Int = 0): KinectDevice<CTX>
fun openDevice(index: Int = 0): Device
/**
* Executes low level Kinect commands in the kinect thread.
*/
fun <T> execute(commands: (CTX) -> T) : T
}
/**
* Represents specific device.
* Opens kinect device of a given serial number.
*
* @param CTX type of data needed to make low level kinect support calls (e.g. freenect contexts).
* @param serialNumber the kinect device serialNumber.
* @throws KinectException if device of such a serial number does not exist
* , or it was already started.
* @see listDevices
*/
interface KinectDevice<CTX> : Extension {
fun openDevice(serialNumber: String): Device
/**
* The list of kinect devices which are already opened and haven't been closed.
*/
val activeDevices: List<Device>
/**
* Represents physical kinect device.
*/
interface Device {
/**
* Provides information about kinect device.
*
* Note: in implementation it can be extended with any
* additional information next to the serial number.
*/
interface Info {
val serialNumber: String
}
val info: Info
val depthCamera: KinectDepthCamera
/**
* Executes low level Kinect commands in the kinect thread in the context of this device.
*/
fun <T> execute(commands: (CTX) -> T): T
fun close()
}
}
/**
* Generic interface for all the kinect cameras.
*/
interface KinectCamera {
var enabled: Boolean
val width: Int
val height: Int
var mirror: Boolean
val currentFrame: ColorBuffer
/**
* Returns the latest frame, but only once. Useful for the scenarios
* where each new frame triggers extra computation. Therefore the same
* expensive operation might happen only once, especially when the refresh
* rate of the target screen is higher than kinect's 30 fps.
* <p>
* Example usage:
* <pre>
* kinect.depthCamera.getLatestFrame()?.let { frame ->
* grayscaleFilter.apply(frame, grayscaleBuffer)
* }
* </pre>
*/
fun getLatestFrame(): ColorBuffer?
}
interface KinectDepthCamera : KinectCamera {
interface KinectDepthCamera : KinectCamera, DepthCamera {
/* no special attributes at the moment */
}
class KinectException(msg: String) : RuntimeException(msg)
open class KinectException(msg: String) : RuntimeException(msg)
/**
* Maps depth values to grayscale.
*/
class DepthToGrayscaleMapper : Filter(
filterShaderFromUrl(resourceUrl("depth-to-grayscale.frag", Kinects::class))
)
fun kinectRawDepthByteBuffer(resolution: IntVector2): ByteBuffer =
ByteBuffer.allocateDirect(
resolution.x * resolution.y * 2
).also {
it.order(ByteOrder.nativeOrder())
}
/**
* Maps depth values to color map according to natural light dispersion as described
* by Alan Zucconi in the
* <a href="https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/">Improving the Rainbow</a>
* article.
*/
class DepthToColorsZucconi6Mapper : Filter(
filterShaderFromUrl(resourceUrl("depth-to-colors-zucconi6.frag", Kinects::class))
)
fun <T : Kinect> KClass<T>.filterFrom(resource: String, flipH: Boolean, flipV: Boolean): Filter {
val url = resourceUrl(resource, this)
val preamble =
(if (flipH) "#define KINECT_FLIPH\n" else "") +
(if (flipV) "#define KINECT_FLIPV\n" else "")
return Filter(
filterShaderFromCode(
"$preamble\n${URL(url).readText()}",
"kinect-shader: $url + flipH: $flipH, flipV: $flipV"
)
)
}
/**
* Maps depth values to color map according to
* <a href="https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html">
* Turbo, An Improved Rainbow Colormap for Visualization
* </a>
* by Google.
*/
class DepthToColorsTurboMapper : Filter(
filterShaderFromUrl(resourceUrl("depth-to-colors-turbo.frag", Kinects::class))
)
class KinectDepthMappers<T : Kinect>(resource: String, `class`: KClass<T>) {
private val flipHFalseVFalse = `class`.filterFrom(resource, flipH = false, flipV = false)
private val flipHFalseVTrue = `class`.filterFrom(resource, flipH = false, flipV = true)
private val flipHTrueVFalse = `class`.filterFrom(resource, flipH = true, flipV = false)
private val flipHTrueVTrue = `class`.filterFrom(resource, flipH = true, flipV = true)
fun select(flipH: Boolean, flipV: Boolean): Filter =
if (flipH) {
if (flipV) flipHTrueVTrue
else flipHTrueVFalse
} else {
if (flipV) flipHFalseVTrue
else flipHFalseVFalse
}
fun update(resolution: IntVector2) {
val resolutionXMinus1 = resolution.x - 1
flipHTrueVFalse.parameters["resolutionXMinus1"] = resolutionXMinus1
flipHTrueVTrue.parameters["resolutionXMinus1"] = resolutionXMinus1
}
fun forEach(block: (filter: Filter) -> Unit) {
block(flipHFalseVFalse)
block(flipHFalseVTrue)
block(flipHTrueVFalse)
block(flipHTrueVTrue)
}
}
fun depthToRawNormalizedMappers() = KinectDepthMappers("depth-to-raw-normalized.frag", Kinect::class)

View File

@@ -1,154 +0,0 @@
package org.openrndr.extra.kinect.impl
import org.openrndr.Program
import org.openrndr.draw.*
import org.openrndr.extra.kinect.KinectDepthCamera
import org.openrndr.extra.kinect.KinectDevice
import org.openrndr.extra.kinect.Kinects
import org.openrndr.math.Vector2
import org.openrndr.resourceUrl
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicReference
import java.util.function.Supplier
class DefaultKinects<CTX>(
private val program: Program,
private val manager: KinectsManager<CTX>
) : Kinects<CTX> {
private inner class Destroyer : Thread() {
override fun run() {
manager.shutdown()
}
}
init {
manager.initialize()
// as we don't have explicit shutdown mechanism in OPENRNDR
// we need to install a shutdown hook for now
Runtime.getRuntime().addShutdownHook(Destroyer())
}
override fun countDevices(): Int {
return manager.countDevices()
}
override fun startDevice(num: Int): KinectDevice<CTX> {
val device = manager.startDevice(num)
program.extend(device)
return device
}
override fun <T> execute(commands: (CTX) -> T): T {
return manager.execute(commands)
}
}
interface KinectsManager<CTX> {
fun initialize()
fun countDevices(): Int
fun startDevice(num: Int): KinectDevice<CTX>
fun <T> execute(commands: (CTX) -> T): T
fun shutdown()
}
interface KinectFeatureEnabler {
var enabled: Boolean
}
interface KinectCommandsExecutor<CTX> {
fun <T> execute(commands: (CTX) -> T): T
}
class DefaultKinectDevice<CTX>(
override val depthCamera: DefaultKinectDepthCamera,
private val commandsExecutor: KinectCommandsExecutor<CTX>
) : KinectDevice<CTX> {
override var enabled: Boolean = true
override fun beforeDraw(drawer: Drawer, program: Program) {
depthCamera.update()
}
override fun <T> execute(commands: (CTX) -> T): T {
return commandsExecutor.execute(commands)
}
}
class DefaultKinectDepthCamera(
override val width: Int,
override val height: Int,
depthScale: Double,
private val enabler: KinectFeatureEnabler,
private val bytesSupplier: Supplier<ByteBuffer?>
) :
KinectDepthCamera, UpdatableKinectCamera {
override var enabled: Boolean
get() = enabler.enabled
set(value) {
enabler.enabled = value
}
private val rawBuffer: ColorBuffer = colorBuffer(
width,
height,
format = ColorFormat.R,
type = ColorType.UINT16 // it would be perfect if we could use isampler in the shader
)
override val currentFrame: ColorBuffer = colorBuffer(
width,
height,
format = ColorFormat.R,
type = ColorType.FLOAT16 // in the future we might want to choose the precision here
)
private val depthMapper = KinectRawDataToDepthMapper()
init {
depthMapper.depthScale = depthScale
depthMapper.mirror = false
depthMapper.resolution = Vector2(width.toDouble(), height.toDouble())
}
private val newFrameRef = AtomicReference<ColorBuffer>()
override fun getLatestFrame(): ColorBuffer? {
return newFrameRef.getAndSet(null)
}
override fun update() {
if (enabled) {
bytesSupplier.get()?.let { bytes ->
rawBuffer.write(bytes)
depthMapper.apply(rawBuffer, currentFrame)
newFrameRef.set(currentFrame)
}
}
}
override var mirror: Boolean
get() = depthMapper.mirror
set(value) {
depthMapper.mirror = value
}
}
private class KinectRawDataToDepthMapper :
Filter(
filterShaderFromUrl(
resourceUrl(
"kinect-raw-to-depth.frag",
DefaultKinects::class
)
)
) {
var depthScale: Double by parameters
var mirror: Boolean by parameters
var resolution: Vector2 by parameters
}
private interface UpdatableKinectCamera {
fun update()
}

View File

@@ -1,9 +0,0 @@
#version 330
uniform sampler2D tex0;
out vec3 color;
void main() {
float depth = texelFetch(tex0, ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y)), 0).r;
color = (depth >= .999) ? vec3(0) : vec3(depth);
}

View File

@@ -0,0 +1,19 @@
#ifndef KINECT_FLIPV
layout(origin_upper_left) in vec4 gl_FragCoord;
#endif
uniform usampler2D tex0; // kinect raw
uniform float maxDepthValue;
#ifdef KINECT_FLIPH
uniform int resolutionXMinus1;
#endif
out float outDepth; // measured in meters
void main() {
ivec2 uv = ivec2(gl_FragCoord);
#ifdef KINECT_FLIPH
uv = ivec2(resolutionXMinus1 - uv.x, uv.y);
#endif
uint uintDepth = texelFetch(tex0, uv, 0).r;
outDepth = float(uintDepth) / maxDepthValue;
}

View File

@@ -1,16 +0,0 @@
#version 330
uniform sampler2D tex0; // kinect raw
uniform vec2 resolution; // kinect resolution
uniform float depthScale; // 32 for kinect1, 64 for kinect2
uniform bool mirror;
out float depth;
void main() {
ivec2 uv = ivec2(
mirror ? int(resolution.x) - 1 - int(gl_FragCoord.x) : int(gl_FragCoord.x),
int(resolution.y) - 1 - int(gl_FragCoord.y)
);
depth = texelFetch(tex0, uv, 0).r * depthScale;
}

View File

@@ -1,5 +1,8 @@
dependencies {
implementation(project(":orx-jvm:orx-kinect-v1"))
implementation(project(":orx-jvm:orx-depth-camera-calibrator"))
implementation(project(":orx-fx"))
implementation(project(":orx-jvm:orx-gui"))
runtimeOnly(project(":orx-jvm:orx-kinect-v1-${(gradle as ExtensionAware).extra["openrndrClassifier"]}"))
runtimeOnly(libs.openrndr.gl3.core)
runtimeOnly(libs.openrndr.gl3.natives)

View File

@@ -1,73 +0,0 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.ColorFormat
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.kinect.*
import org.openrndr.extra.kinect.v1.getKinectsV1
/**
* Shows 4 different representations of the depth map.
* <ol>
* <li>the original depth map stored as RED channel values</li>
* <li>the same values expressed as gray tones</li>
* <li>
* color map according to natural light dispersion as described
* by Alan Zucconi in the
* <a href="https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/">Improving the Rainbow</a>
* article.
* </li>
* <li>
* color map according to
* <a href="https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html">
* Turbo, An Improved Rainbow Colormap for Visualization
* </a>
* by Google.
* </li>
* </ol>
*
* @see DepthToGrayscaleMapper
* @see DepthToColorsZucconi6Mapper
* @see DepthToColorsTurboMapper
*/
fun main() = application {
configure {
width = 2 * 640
height = 2 * 480
}
program {
val kinects = getKinectsV1(this)
val kinect = kinects.startDevice()
kinect.depthCamera.enabled = true
kinect.depthCamera.mirror = true
val camera = kinect.depthCamera
val grayscaleFilter = DepthToGrayscaleMapper()
val zucconiFilter = DepthToColorsZucconi6Mapper()
val turboFilter = DepthToColorsTurboMapper()
val grayscaleBuffer = kinectColorBuffer(camera)
val zucconiBuffer = kinectColorBuffer(camera)
val turboBuffer = kinectColorBuffer(camera)
extend {
/*
* Note: getting the latest frame this way will guarantee
* that filters are being applied only if the actual new frame
* from kinect was received. Kinect has different refresh rate
* than usual screen (30 fps).
*/
kinect.depthCamera.getLatestFrame()?.let { frame ->
grayscaleFilter.apply(frame, grayscaleBuffer)
zucconiFilter.apply(frame, zucconiBuffer)
turboFilter.apply(frame, turboBuffer)
}
drawer.image(camera.currentFrame)
drawer.image(grayscaleBuffer, camera.width.toDouble(), 0.0)
drawer.image(turboBuffer, 0.0, camera.height.toDouble())
drawer.image(zucconiBuffer, camera.width.toDouble(), camera.height.toDouble())
}
}
}
private fun kinectColorBuffer(camera: KinectCamera): ColorBuffer {
return colorBuffer(camera.width, camera.height, format = ColorFormat.RGB)
}

View File

@@ -1,10 +1,10 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.getKinectsV1
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Basic kinect use case showing continuous stream from the depth camera.
* Basic kinect1 use case showing continuous stream from the depth camera.
*
* Note: kinect depth map is stored only on the RED color channel to save
* space. Therefore depth map is displayed only in the red tones.
@@ -15,12 +15,12 @@ fun main() = application {
height = 480
}
program {
val kinects = getKinectsV1(this)
val kinect = kinects.startDevice()
kinect.depthCamera.enabled = true
kinect.depthCamera.mirror = true
val kinect = extend(Kinect1())
val device = kinect.openDevice()
device.depthCamera.flipH = true // to make a mirror
device.depthCamera.enabled = true
extend {
drawer.image(kinect.depthCamera.currentFrame)
drawer.image(device.depthCamera.currentFrame)
}
}
}

View File

@@ -0,0 +1,111 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.fx.colormap.TurboColormap
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.kinect.v1.Kinect1
import org.openrndr.extra.parameters.BooleanParameter
import org.openrndr.extra.parameters.DoubleParameter
/**
* A use case where "virtual walls" can be established within certain
* depth ranges. Useful for actual installations, like interactive
* projections in the form of a "mirror" for the human silhouette.
* The measurement in meters helps in calibration.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
camera.flipH = true // to make a mirror
camera.depthMeasurement = DepthMeasurement.METERS
val turboColormap = TurboColormap().apply {
minValue = .5
maxValue = 5.0
curve = 1.0
}
val outputBuffer = colorBuffer(
camera.resolution.x,
camera.resolution.y
)
/*
* Note: the code specified in onFrameReceived will be executed as soon as
* possible, also when GPU is idle.
*
* Also TurboColormap filter will be applied only after actual new frame
* from kinect is received instead of being applied for each
* program frame. Kinect has different refresh rate (30 fps) than usual
* display.
*/
camera.onFrameReceived { frame ->
turboColormap.apply(frame, outputBuffer)
}
camera.enabled = true
@Suppress("unused")
val settings = object {
@BooleanParameter(label = "enabled", order = 0)
var enabled: Boolean
get() = camera.enabled
set(value) {
camera.enabled = value
}
@BooleanParameter(label = "flipH", order = 1)
var flipH: Boolean
get() = camera.flipH
set(value) {
camera.flipH = value
}
@BooleanParameter(label = "flipV", order = 2)
var flipV: Boolean
get() = camera.flipV
set(value) {
camera.flipV = value
}
/*
Note: we could use turboColormap parameters directly in the GUI, however the
high range is cap to 1.0 there, and we want to use calibration in meters.
Increase 5.0 to something higher if you are calibrating for a bigger space.
*/
@DoubleParameter(label = "min distance", order = 3, low = 0.2, high = 5.0)
var minDistance: Double
get() = turboColormap.minValue
set(value) {
turboColormap.minValue = value
}
@DoubleParameter(label = "max distance", order = 4, low = 0.2, high = 5.0)
var maxDistance: Double
get() = turboColormap.maxValue
set(value) { turboColormap.maxValue = value }
@DoubleParameter(label = "distance curve", order = 5, low = 0.01, high = 10.0)
var curve: Double
get() = turboColormap.curve
set(value) {
turboColormap.curve = value
}
}
extend(GUI()) {
persistState = false
compartmentsCollapsedByDefault = false
add(settings, label = "depth camera")
}
extend {
drawer.image(outputBuffer)
}
}
}

View File

@@ -0,0 +1,93 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.ColorFormat
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.fx.colormap.GrayscaleColormap
import org.openrndr.extra.fx.colormap.SpectralZucconiColormap
import org.openrndr.extra.fx.colormap.TurboColormap
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.kinect.v1.Kinect1
import org.openrndr.extra.parameters.BooleanParameter
import org.openrndr.math.Vector2
/**
* Shows 4 different color representations of the depth map:
*
* * the original depth map stored as RED channel values
* * the same values expressed as gray tones
* * zucconi6 color map according to natural light dispersion as described
* by Alan Zucconi in
* [Improving the Rainbow](https://www.alanzucconi.com/2017/07/15/improving-the-rainbow/)
* article
* * turbo color map according to
* [Turbo, An Improved Rainbow Colormap for Visualization](https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html)
* by Google.
*
* Note: the values are normalized in range 0-1, not in meters.
* @see GrayscaleColormap
* @see SpectralZucconiColormap
* @see TurboColormap
*/
fun main() = application {
val guiOffset = 200
configure {
width = 2 * 640 + guiOffset
height = 2 * 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
fun outputBuffer() = colorBuffer(
camera.resolution.x,
camera.resolution.y,
format = ColorFormat.RGB
)
val grayscaleColormap = GrayscaleColormap()
val spectralZucconiColormap = SpectralZucconiColormap()
val turboColormap = TurboColormap()
val grayscaleBuffer = outputBuffer()
val zucconiBuffer = outputBuffer()
val turboBuffer = outputBuffer()
@Suppress("unused")
val settings = object {
@BooleanParameter(label = "enabled", order = 0)
var enabled: Boolean
get() = camera.enabled
set(value) { camera.enabled = value }
@BooleanParameter(label = "flipH", order = 1)
var flipH: Boolean
get() = camera.flipH
set(value) { camera.flipH = value }
@BooleanParameter(label = "flipV", order = 2)
var flipV: Boolean
get() = camera.flipV
set(value) { camera.flipV = value }
}
camera.onFrameReceived { frame ->
grayscaleColormap.apply(frame, grayscaleBuffer)
spectralZucconiColormap.apply(frame, zucconiBuffer)
turboColormap.apply(frame, turboBuffer)
}
camera.enabled = true
extend(GUI()) {
persistState = false
compartmentsCollapsedByDefault = false
add(settings, label = "depth camera")
add(grayscaleColormap)
add(spectralZucconiColormap)
add(turboColormap)
}
extend {
drawer.image(camera.currentFrame, guiOffset.toDouble(), 0.0)
drawer.image(grayscaleBuffer, guiOffset + camera.resolution.x.toDouble(), 0.0)
drawer.image(turboBuffer, guiOffset.toDouble(), camera.resolution.y.toDouble())
drawer.image(zucconiBuffer, Vector2(guiOffset.toDouble(), 0.0) + camera.resolution.vector2)
}
}
}

View File

@@ -0,0 +1,31 @@
package org.openrndr.extra.kinect.v1.demo
import org.bytedeco.libfreenect.global.freenect
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* This demo shows how to execute freenect commands directly, either globally
* or on the device. In this case demo is switching off LED light completely,
* which might be desirable for the aesthetics of an installation,
* however LED turned on might be still a useful indicator during development.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
device.executeInFreenectDeviceContext(
"turn off led"
) { _, _, dev ->
freenect.freenect_set_led(dev, freenect.LED_OFF)
}
device.depthCamera.enabled = true
extend {
drawer.image(device.depthCamera.currentFrame)
}
}
}

View File

@@ -0,0 +1,32 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Render depth data from 2 kinect1 devices side-by-side.
*/
fun main() = application {
configure {
width = 640 * 2
height = 480
}
program {
val kinect = extend(Kinect1())
/*
on production system you might consider using stable kinect serial numbers,
instead of index numbers, to avoid reordering of devices already installed
in physical space.
*/
val depthCamera1 = kinect.openDevice(0).depthCamera
val depthCamera2 = kinect.openDevice(1).depthCamera
depthCamera1.enabled = true
depthCamera1.flipH = true
depthCamera2.enabled = true
depthCamera2.flipH = true
extend {
drawer.image(depthCamera1.currentFrame)
drawer.image(depthCamera2.currentFrame, depthCamera1.resolution.x.toDouble(), 0.0)
}
}
}

View File

@@ -0,0 +1,44 @@
package org.openrndr.extra.kinect.v1.demo
import org.bytedeco.libfreenect.global.freenect
import org.bytedeco.libfreenect.global.freenect.*
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Even though this library is abstracting freenect access, it is still
* possible to call any low level kinect API through execute methods.
* The calls are executed in separate kinect runner thread but they will
* block the calling thread until the result is returned.
*/
fun main() = application {
program {
val kinect = extend(Kinect1())
/*
Blocking version will wait for the result, specifying the name
makes it easier to identify this call in logs when it is finally
executed on kinect. Note: enabling TRACE log level is required.
*/
val numberOfKinectDevices = kinect.executeInFreenectContextBlocking(
name = "numberOfKinectDevices"
) { ctx, _ ->
freenect.freenect_num_devices(ctx)
}
println("numberOfKinectDevices: $numberOfKinectDevices")
val device = kinect.openDevice()
val maxTilt = 90.0
var tilt = 0.0
extend {
device.executeInFreenectDeviceContext("disco LED") { _, _, dev ->
freenect_set_led(dev, (seconds * 10).toInt() % 7) // disco
}
val newTilt = if ((seconds % 10) < 5) -maxTilt else maxTilt
if (tilt != newTilt) {
device.executeInFreenectDeviceContext("tilt change") { _, _, dev ->
freenect_set_tilt_degs(dev, tilt)
}
tilt = newTilt
}
}
}
}

View File

@@ -0,0 +1,30 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.Kinect1
/**
* Here you can see freenect FLOOD log level in action.
*
* Note: technically it would be possible to redirect kinect log to
* slf4j logger in the implementation of [Kinect1], however I removed
* this callback and left logs on the standard out, because it might get so noisy,
* that native-to-JVM round trip with conversion into [String] for JVM
* logging might completely kill the performance and result in
* stack overflow exception.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
kinect.logLevel = Kinect1.LogLevel.FLOOD
val device = kinect.openDevice()
device.depthCamera.enabled = true
extend {
drawer.image(device.depthCamera.currentFrame)
}
}
}

View File

@@ -0,0 +1,49 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.draw.Filter
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.filterShaderFromCode
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.kinect.v1.Kinect1
/**
* It is possible to rewrite raw kinect value interpretation completely
* while keeping all the performance characteristics.
*
* Note: when depth measurement is set to RAW, the flip options does not apply.
*/
fun main() = application {
configure { // default resolution of the Kinect v1 depth camera
width = 640
height = 480
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
camera.depthMeasurement = DepthMeasurement.RAW
val outputBuffer = colorBuffer(camera.resolution.x, camera.resolution.y)
val filter = Filter(filterShaderFromCode("""
layout(origin_upper_left) in vec4 gl_FragCoord;
uniform usampler2D tex0; // kinect raw
out vec4 o_color;
void main() {
ivec2 uv = ivec2(gl_FragCoord);
uint uintDepth = texelFetch(tex0, uv, 0).r;
float depth = float(uintDepth) / 2047.;
o_color = vec4(vec3(depth), 1.);
}
""".trimIndent(),
"raw filter")
)
camera.onFrameReceived { frame ->
filter.apply(frame, outputBuffer)
}
device.depthCamera.enabled = true
extend {
drawer.image(outputBuffer)
}
}
}

View File

@@ -0,0 +1,117 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.Fullscreen
import org.openrndr.application
import org.openrndr.draw.Filter
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.filterShaderFromCode
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.depth.camera.calibrator.DepthCameraCalibrator
import org.openrndr.extra.depth.camera.calibrator.isolatedWithCalibration
import org.openrndr.extra.gui.GUI
import org.openrndr.extra.kinect.v1.Kinect1
/**
* How to use [DepthCameraCalibrator] with [Kinect1]?
*/
fun main() = application {
configure {
fullscreen = Fullscreen.CURRENT_DISPLAY_MODE
}
program {
val kinect = extend(Kinect1())
val device = kinect.openDevice()
val camera = device.depthCamera
// depth measurement in meters is required by the calibrator
camera.depthMeasurement = DepthMeasurement.METERS
val kinectResolution = camera.resolution
val outputBuffer = colorBuffer(
kinectResolution.x,
kinectResolution.y
)
// simple visual effect applied to kinect data
val spaceRangeExtractor = SpaceRangeExtractor()
camera.onFrameReceived { frame ->
spaceRangeExtractor.apply(frame, outputBuffer)
}
val calibrator = DepthCameraCalibrator(this, camera)
val gui = GUI()
calibrator.addControlsTo(gui)
/*
Note: remember that extend(gui) has to be called after all the parameter
controls are added.
Also extensions are rendered in reverse order, if we start with gui,
it will not be covered by calibrator view when calibrator is enabled
*/
extend(gui)
/*
if it's an interactive installation, probably we don't want to
show GUI on startup. It can be shown by pressing F11.
*/
gui.visible = false
/*
Registering this callback here after gui will prevent it from
being triggered multiple times when GUI parameters are restored
on startup.
*/
calibrator.onCalibrationChange { calibration ->
spaceRangeExtractor.minDepth = calibration.minDepth
spaceRangeExtractor.maxDepth = calibration.maxDepth
}
extend(calibrator)
camera.enabled = true
extend {
val calibration = calibrator.getCalibration(camera)
drawer.isolatedWithCalibration(calibration) {
image(
colorBuffer = outputBuffer,
position = calibration.position,
width = calibration.width,
height = calibration.height
)
}
}
// switching calibrator view on and off with keyboard
program.keyboard.keyDown.listen {
if (it.name == "k") {
calibrator.enabled = !calibrator.enabled
}
}
}
}
/**
* A visual effect applied to kinect data in this demonstration.
* Everything is black, except for the white pixels within the range
* of 2 virtual walls positioned at [minDepth] at front of the
* viewer and [maxDepth] behind the viewer.
*/
class SpaceRangeExtractor : Filter(filterShaderFromCode("""
uniform sampler2D tex0; // kinect raw
uniform float minDepth;
uniform float maxDepth;
out vec4 o_color;
void main() {
ivec2 uv = ivec2(gl_FragCoord.xy);
float depth = texelFetch(tex0, uv, 0).r;
float luma = ((depth >= minDepth) && (depth <= maxDepth)) ? 1.0 : 0.0;
o_color = vec4(vec3(luma), 1.0);
}
""".trimIndent(),
"space range extractor"
)) {
var minDepth: Double by parameters
var maxDepth: Double by parameters
}

View File

@@ -1,33 +0,0 @@
package org.openrndr.extra.kinect.v1.demo
import org.openrndr.application
import org.openrndr.extra.kinect.v1.getKinectsV1
/**
* Stream from 2 kinects side by side.
*/
fun main() = application {
configure {
width = 640 * 2
height = 480
}
program {
val kinects = getKinectsV1(this)
val depthCamera1 = kinects.startDevice(0).depthCamera
val depthCamera2 = kinects.startDevice(1).depthCamera
depthCamera1.enabled = true
depthCamera1.mirror = true
depthCamera2.enabled = true
depthCamera2.mirror = true
extend {
drawer.image(depthCamera1.currentFrame)
drawer.image(depthCamera2.currentFrame, depthCamera1.width.toDouble(), 0.0)
}
keyboard.keyDown.listen { keyEvent ->
if (keyEvent.name == "1") {depthCamera1.enabled = !depthCamera1.enabled }
if (keyEvent.name == "2") {depthCamera2.enabled = !depthCamera2.enabled }
if (keyEvent.name == "q") {depthCamera1.mirror = !depthCamera1.mirror }
if (keyEvent.name == "w") {depthCamera2.mirror = !depthCamera2.mirror }
}
}
}

View File

@@ -1,43 +0,0 @@
package org.openrndr.extra.kinect.v1.demo
import org.bytedeco.libfreenect.global.freenect
import org.bytedeco.libfreenect.global.freenect.*
import org.openrndr.application
import org.openrndr.extra.kinect.v1.getKinectsV1
import java.lang.RuntimeException
/**
* Even though this library is abstracting freenect access, it is still
* possible to call any low level kinect API through execute methods.
* The calls are executed in separate kinect runner thread but they will
* block the calling thread until the result is returned.
*/
fun main() = application {
program {
val kinects = getKinectsV1(this)
// the same as calling kinects.countDevices(), here to show that any value might be returned from execute
val num = kinects.execute { ctx -> freenect_num_devices(ctx.fnCtx) }
if (num == 0) { throw RuntimeException("no kinect detected") }
kinects.execute { ctx ->
freenect_set_log_level(ctx.fnCtx, freenect.FREENECT_LOG_FLOOD) // lots of logs
}
kinects.execute { ctx ->
// extra FREENECT_DEVICE_MOTOR gives control over tilt and LEDs
freenect_select_subdevices(ctx.fnCtx, FREENECT_DEVICE_CAMERA xor FREENECT_DEVICE_MOTOR)
}
val kinect = kinects.startDevice()
var tilt = 90.0
extend {
kinect.execute { ctx ->
freenect_set_led(ctx.fnDev, (seconds * 10).toInt() % 7) // disco
}
val currentTilt = if ((seconds % 10) < 5) -90.0 else 90.0
if (currentTilt != tilt) {
kinect.execute { ctx ->
freenect_set_tilt_degs(ctx.fnDev, currentTilt)
}
tilt = currentTilt
}
}
}
}

View File

@@ -0,0 +1,562 @@
package org.openrndr.extra.kinect.v1
import kotlinx.coroutines.Job
import kotlinx.coroutines.flow.*
import mu.KotlinLogging
import org.bytedeco.javacpp.Pointer
import org.bytedeco.libfreenect.*
import org.bytedeco.libfreenect.global.freenect.*
import org.openrndr.Extension
import org.openrndr.Program
import org.openrndr.draw.*
import org.openrndr.extra.depth.camera.DepthMeasurement
import org.openrndr.extra.kinect.*
import org.openrndr.launch
import org.openrndr.math.IntVector2
import java.util.*
import java.util.concurrent.*
import kotlin.concurrent.thread
class Kinect1Exception(msg: String) : KinectException(msg)
class Kinect1 : Kinect, Extension {
override var enabled: Boolean = true
/**
* Without the delay between starting depth camera and
* registering depth callback, no frames are transferred
* at all. However this problem happens only on the first
* try with freshly connected kinect.
* Subsequent runs of the same program don't require
* this delay at all.
*/
private var cameraInitializationDelay: Long = 100
class DeviceInfo(
override val serialNumber: String,
) : Kinect.Device.Info {
override fun toString(): String {
return "Kinect1[$serialNumber]"
}
}
/**
* Log level for native freenect logging.
*
* Note: logs will appear on standard out for performance reasons.
*
* @param code the code of corresponding freenect log level.
* @see Kinect1.logLevel
*/
@Suppress("unused")
enum class LogLevel(val code: Int) {
/** Crashing/non-recoverable errors. */
FATAL(FREENECT_LOG_FATAL),
/** Major errors. */
ERROR(FREENECT_LOG_ERROR),
/** Warning messages. */
WARNING(FREENECT_LOG_WARNING),
/** Important messages. */
NOTICE(FREENECT_LOG_NOTICE),
/** Log for normal messages. */
INFO(FREENECT_LOG_INFO),
/** Log for useful development messages. */
DEBUG(FREENECT_LOG_DEBUG),
/** Log for slightly less useful messages. */
SPEW(FREENECT_LOG_SPEW),
/** Log EVERYTHING. May slow performance. */
FLOOD(FREENECT_LOG_FLOOD);
}
/**
* Kinect native log level, defaults to `INFO`.
*/
var logLevel: LogLevel
get() = freenect.logLevel
set(value) { freenect.logLevel = value }
private val logger = KotlinLogging.logger {}
private lateinit var program: Program
private lateinit var freenect: Freenect
override fun setup(program: Program) {
if (!enabled) { return }
logger.info("Starting Kinect1 support")
this.program = program
freenect = Freenect(initialLogLevel = LogLevel.INFO)
}
override fun listDevices(): List<DeviceInfo> = freenect.callBlocking(
"listDevices"
) { _, _ ->
freenect.listDevices()
}
override fun openDevice(index: Int): V1Device {
val result = freenect.callBlocking("openDeviceByIndex") { ctx, _ ->
val devices = freenect.listDevices()
if (devices.isEmpty()) {
throw KinectException("No kinect devices detected, cannot open any")
} else if (index >= devices.size) {
throw KinectException("Invalid device index, number of kinect1 devices: ${devices.size}")
}
Pair(
openFreenectDevice(
ctx,
devices[index].serialNumber
),
devices[index]
)
}
val device = V1Device(result.first, result.second)
mutableActiveDevices.add(device)
return device
}
override fun openDevice(serialNumber: String): V1Device {
val dev = freenect.callBlocking("openDeviceBySerial") { ctx, _ ->
openFreenectDevice(ctx, serialNumber)
}
val device = V1Device(dev, DeviceInfo(serialNumber))
mutableActiveDevices.add(device)
return device
}
private val mutableActiveDevices = LinkedList<V1Device>()
override val activeDevices: List<Kinect.Device>
get() = mutableActiveDevices
private fun openFreenectDevice(
ctx: freenect_context,
serialNumber: String,
): freenect_device {
val dev = freenect_device()
freenect.checkReturn(
freenect_open_device_by_camera_serial(ctx, dev, serialNumber)
)
return dev
}
override fun shutdown(program: Program) {
if (!enabled) { return }
logger.info { "Shutting down Kinect1 support" }
logger.debug("Closing active devices, count: ${mutableActiveDevices.size}")
mutableActiveDevices.forEach {
it.close()
}
mutableActiveDevices.clear()
freenect.close()
}
@Suppress("unused")
fun executeInFreenectContext(
name: String,
block: (ctx: freenect_context, usbCtx: freenect_usb_context) -> Unit
) {
freenect.call(name) { ctx, usbCtx ->
block(ctx, usbCtx)
}
}
fun <T> executeInFreenectContextBlocking(
name: String,
block: (ctx: freenect_context, usbCtx: freenect_usb_context) -> T
): T = freenect.callBlocking(name) { ctx, usbCtx ->
block(ctx, usbCtx)
}
inner class V1Device(
private val dev: freenect_device,
override val info: DeviceInfo
) : Kinect.Device {
inner class V1DepthCamera(
override val resolution: IntVector2,
) : KinectDepthCamera {
private var firstStart = true
private var started = false
private var bytesFront = kinectRawDepthByteBuffer(resolution)
private var bytesBack = kinectRawDepthByteBuffer(resolution)
private val bytesFlow = MutableStateFlow(bytesBack) // the first frame will come from bytesFront
private val rawBuffer = colorBuffer(
resolution.x,
resolution.y,
format = ColorFormat.R,
type = ColorType.UINT16_INT
).also {
it.filter(MinifyingFilter.NEAREST, MagnifyingFilter.NEAREST)
}
private val processedFrameBuffer = colorBuffer(
resolution.x,
resolution.y,
format = ColorFormat.R,
type = ColorType.FLOAT16 // in the future we might want to choose the precision here
).also {
it.filter(MinifyingFilter.LINEAR, MagnifyingFilter.LINEAR)
}
private var mutableCurrentFrame = processedFrameBuffer
private val depthMappers = Kinect1DepthMappers().apply {
update(resolution)
}
override val currentFrame get() = mutableCurrentFrame
private var onFrameReceived: (frame: ColorBuffer) -> Unit = {}
// working on rendering thread
private val frameReceiverJob: Job = program.launch {
bytesFlow.collect { bytes ->
rawBuffer.write(bytes)
depthMappers.mapper?.apply(rawBuffer, processedFrameBuffer)
onFrameReceived(mutableCurrentFrame)
}
}
private val freenectDepthCallback = object : freenect_depth_cb() {
override fun call(
dev: freenect_device,
depth: Pointer,
timestamp: Int
) {
bytesFlow.tryEmit(bytesFront)
val bytesTmp = bytesBack
bytesBack = bytesFront
bytesFront = bytesTmp
freenect.checkReturn(
freenect_set_depth_buffer(dev, Pointer(bytesFront))
)
}
}
override var enabled: Boolean = false
set(value) {
logger.debug { "$info.enabled = $value" }
if (value == field) {
logger.debug { "$info.enabled: doing nothing, already in state: $value" }
return
}
field = value
freenect.call("$info.enabled = $value") { _, _ ->
if (value) start() else stop()
}
}
override var depthMeasurement: DepthMeasurement
get() = depthMappers.depthMeasurement
set(value) {
logger.debug { "$info.depthMeasurement = $value" }
depthMappers.depthMeasurement = value
mutableCurrentFrame =
if (value == DepthMeasurement.RAW) rawBuffer
else processedFrameBuffer
}
override var flipH: Boolean
get() = depthMappers.flipH
set(value) {
logger.debug { "$info.flipH = $value" }
depthMappers.flipH = value
}
override var flipV: Boolean
get() = depthMappers.flipV
set(value) {
logger.debug { "$info.flipV = $value" }
depthMappers.flipV = value
}
private fun start() {
logger.info { "$info.start()" }
freenect.checkReturn(freenect_set_depth_mode(
dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT))
)
freenect.checkReturn(freenect_set_depth_buffer(dev, Pointer(bytesFront)))
freenect.checkReturn(freenect_start_depth(dev))
if (firstStart) { // workaround, see comments above
Thread.sleep(cameraInitializationDelay)
firstStart = false
}
freenect_set_depth_callback(dev, freenectDepthCallback)
started = true
freenect.expectingEvents = true
}
internal fun stop() {
logger.info { "$info.stop()" }
if (started) {
freenect.expectingEvents = false
freenect.checkReturn(freenect_stop_depth(dev))
started = false
} else {
logger.warn { "$info.stop(): cannot stop already stopped depth camera" }
}
}
internal fun close() {
frameReceiverJob.cancel()
}
override fun onFrameReceived(block: (frame: ColorBuffer) -> Unit) {
onFrameReceived = block
}
}
override val depthCamera: V1DepthCamera = V1DepthCamera(
resolution = KINECT1_DEPTH_RESOLUTION
)
fun executeInFreenectDeviceContext(
name: String,
block: (ctx: freenect_context, usbCtx: freenect_usb_context, dev: freenect_device) -> Unit
) {
freenect.call("$info: $name") { ctx, usbCtx ->
block(ctx, usbCtx, dev)
}
}
@Suppress("unused")
fun <T> executeInFreenectDeviceContextBlocking(
name: String,
block: (ctx: freenect_context, usbCtx: freenect_usb_context, dev: freenect_device) -> T
): T = freenect.callBlocking("$info: $name") { ctx, usbCtx ->
block(ctx, usbCtx, dev)
}
override fun close() {
logger.info { "$info.close()" }
freenect.callBlocking("$info.closeDevice") { _, _ ->
depthCamera.stop()
freenect.checkReturn(freenect_close_device(dev))
mutableActiveDevices.remove(this)
}
depthCamera.close()
}
}
}
/**
* This class provides a low level API for accessing a kinect1 device.
* All the operations are executed in a single thread responsible for calling
* freenect API.
*
* @param initialLogLevel the log level to use when freenect is initialized.
*/
class Freenect(private val initialLogLevel: Kinect1.LogLevel) {
private val logger = KotlinLogging.logger {}
var logLevel: Kinect1.LogLevel = initialLogLevel
set(value) {
call("logLevel[$value]") { ctx, _ ->
freenect_set_log_level(ctx, value.code)
}
field = value
}
internal var expectingEvents: Boolean = false
private val ctx = freenect_context()
private val usbCtx = freenect_usb_context()
private var running: Boolean = true
private val runner = thread(name = "kinect1", start = true) {
logger.info("Starting Kinect1 thread")
checkReturn(freenect_init(ctx, usbCtx))
freenect_set_log_level(ctx, logLevel.code)
val num = checkReturn(freenect_num_devices(ctx))
if (num == 0) {
logger.warn { "Could not find any Kinect1 devices, calling openDevice() will throw exception" }
} else {
val devices = listDevices()
logger.info { "Kinect1 detected, device count: ${devices.size}" }
devices.forEachIndexed { index, info ->
logger.info { " |-[$index]: ${info.serialNumber}" }
}
}
while (running) {
if (expectingEvents) {
val ret = freenect_process_events(ctx)
if (ret != 0) {
logger.error { "freenect_process_events returned non-zero value: $ret" }
}
val tasks = freenectCallQueue.iterator()
for (task in tasks) {
tasks.remove()
task.run()
}
} else {
freenectCallQueue.pollFirst()?.run()
}
}
checkReturn(freenect_shutdown(ctx))
}
private val freenectCallQueue = LinkedBlockingDeque<FutureTask<*>>()
fun call(
name: String,
block: (
ctx: freenect_context,
usbCtx: freenect_usb_context
) -> Unit
) {
logger.debug { "'$name' requested (non-blocking)" }
val task = FutureTask {
logger.trace { "'$name': started" }
try {
block(ctx, usbCtx)
logger.trace { "'$name': ended" }
} catch (e: Exception) {
logger.error("'$name': failed", e)
}
}
freenectCallQueue.add(task)
}
fun <T> callBlocking(
name: String,
block: (
ctx: freenect_context,
usbCtx: freenect_usb_context
) -> T
): T {
logger.debug { "'$name' requested (blocking)" }
val task = FutureTask {
logger.trace { "'$name': started" }
try {
val result = block(ctx, usbCtx)
logger.trace { "'$name': ended" }
Result.success(result)
} catch (e: Exception) {
logger.error("'$name': failed", e)
Result.failure(e)
}
}
freenectCallQueue.add(task)
val result = task.get()
logger.trace { "'$name': returned result" }
return result.getOrThrow()
}
fun listDevices() : List<Kinect1.DeviceInfo> {
val attributes = freenect_device_attributes()
freenect_list_device_attributes(ctx, attributes)
try {
val devices = buildList {
var item: freenect_device_attributes? =
if (attributes.isNull) null
else attributes
while (item != null) {
val serialNumber = item.camera_serial().string
add(Kinect1.DeviceInfo(serialNumber))
item = item.next()
}
}
return devices
} finally {
if (!attributes.isNull) {
freenect_free_device_attributes(attributes)
}
}
}
fun close() {
logger.debug("Closing Kinect1 runner")
running = false
logger.debug("Waiting for runner thread to finish")
runner.join()
}
fun checkReturn(ret: Int): Int =
if (ret >= 0) ret
else {
throw Kinect1Exception("Freenect error: ret=$ret")
}
}
internal const val KINECT1_MAX_DEPTH_VALUE: Double = 2047.0
internal val KINECT1_DEPTH_RESOLUTION: IntVector2 = IntVector2(640, 480)
internal class Kinect1DepthMappers {
private val depthToRawNormalized = depthToRawNormalizedMappers().apply {
forEach {
it.parameters["maxDepthValue"] = KINECT1_MAX_DEPTH_VALUE
}
}
private val depthToMeters = KinectDepthMappers(
"kinect1-depth-to-meters.frag",
Kinect1::class
)
var depthMeasurement: DepthMeasurement = DepthMeasurement.RAW_NORMALIZED
set(value) {
field = value
selectMapper()
}
var flipH: Boolean = false
set(value) {
field = value
selectMapper()
}
var flipV: Boolean = false
set(value) {
field = value
selectMapper()
}
var mapperState: Filter? = depthToRawNormalized.select(
flipH = false,
flipV = false
)
val mapper: Filter? get() = mapperState
fun update(resolution: IntVector2) {
depthToRawNormalized.update(resolution)
depthToMeters.update(resolution)
}
private fun selectMapper() {
mapperState = when (depthMeasurement) {
DepthMeasurement.RAW -> null
DepthMeasurement.RAW_NORMALIZED -> {
depthToRawNormalized.select(flipH, flipV)
}
DepthMeasurement.METERS -> {
depthToMeters.select(flipH, flipV)
}
}
}
}

View File

@@ -1,289 +0,0 @@
package org.openrndr.extra.kinect.v1
import mu.KotlinLogging
import org.bytedeco.javacpp.Pointer
import org.bytedeco.libfreenect.*
import org.bytedeco.libfreenect.global.freenect.*
import org.bytedeco.libfreenect.presets.freenect
import org.openrndr.Program
import org.openrndr.extra.kinect.*
import org.openrndr.extra.kinect.impl.*
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.util.*
import java.util.concurrent.*
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicReference
import java.util.function.Supplier
import kotlin.concurrent.thread
/**
* Returns support for kinect version 1.
*
* @param depthCameraInitializationDelay defaults to 100 ms. Delay seems to be
* necessary due to either my misunderstanding or some weird freenect bug.
* Without the delay between starting depth camera and registering
* depth callback, no frames are transferred at all. However this
* problem happens only on the first try with freshly connected
* kinect. Subsequent runs of the same program don't require
* this delay at all.
*/
fun getKinectsV1(program: Program, depthCameraInitializationDelay: Long = 100) : Kinects<Freenect> {
return DefaultKinects(program, KinectsV1Manager(depthCameraInitializationDelay))
}
/** Provides low level freenect context for calling native freenect methods. */
class Freenect(
val fnCtx: freenect_context,
val fnUsbCtx: freenect_usb_context,
val fnDev: freenect_device? = null // only available for device level commands
)
private class KinectsV1Manager(val depthCameraInitializationDelay: Long) : KinectsManager<Freenect> {
private val logger = KotlinLogging.logger {}
private val fnCtx = freenect_context()
private val fnUsbCtx = freenect_usb_context()
private val ctx = Freenect(fnCtx, fnUsbCtx)
private var taskQueue = LinkedBlockingDeque<FutureTask<*>>()
private var running = true
private val runner = thread(
name = "Kinect1-runner",
start = false,
isDaemon = true
) {
initializeFreenect()
while (running) { mainLoop() }
shutdownFreenect()
}
private var expectingEvents = false
private val devices: LinkedList<FreenectDevice> = LinkedList()
private val timeout = freenect.timeval()
init { timeout.tv_sec(1) }
private inner class KinectV1CommandsExecutor(val context: Freenect): KinectCommandsExecutor<Freenect> {
override fun <T> execute(commands: (Freenect) -> T): T {
return callSync {
logger.trace { "executing native freenect commands" }
commands(context)
}
}
}
private val commandsExecutor = KinectV1CommandsExecutor(ctx)
override fun initialize() {
logger.info("Initializing Kinect1 support, set log level to TRACE to see received frames")
runner.start()
}
private fun initializeFreenect() {
logger.debug("initializing freenect")
verify(freenect_init(fnCtx, fnUsbCtx))
freenect_set_log_level(fnCtx, FREENECT_LOG_INFO)
freenect_select_subdevices(fnCtx, FREENECT_DEVICE_CAMERA)
val num = verify(freenect_num_devices(fnCtx))
if (num == 0) {
logger.warn { "Could not find any Kinect1 device, calling startDevice() will throw exception" }
}
}
private fun mainLoop() {
if (expectingEvents) {
val ret = freenect_process_events(fnCtx)
if (ret != 0) { logger.error { "freenect_process_events returned non-zero value: $ret" } }
val tasks = taskQueue.iterator()
for (task in tasks) {
tasks.remove()
task.run()
}
} else {
taskQueue.poll(100, TimeUnit.MILLISECONDS)?.run()
}
}
private fun shutdownFreenect() {
logger.debug("shutting down freenect")
if (!fnCtx.isNull) {
devices.forEach { device -> device.shutdown() }
devices.clear()
verifyOnShutdown(freenect_shutdown(fnCtx))
}
}
override fun countDevices(): Int {
return callSync { verify(freenect_num_devices(fnCtx)) }
}
override fun startDevice(num: Int): KinectDevice<Freenect> {
callSync {
devices.find { device -> device.num == num }
}?.let {
throw KinectException("Kinect1 device already started, num: $num")
}
val count = countDevices()
if (num >= count) {
throw KinectException(
"Trying to start non-existent Kinect1 device, " +
"device count: $count, num: $num (index starts with 0)"
)
}
val device = callSync {
val device = FreenectDevice(num)
devices.add(device)
device
}
return DefaultKinectDevice(
DefaultKinectDepthCamera(
device.depthCamera.width,
device.depthCamera.height,
32.0,
device.depthCamera.enabler,
device.depthCamera.bytesSupplier
),
KinectV1CommandsExecutor(device.devCtx)
)
}
override fun <T> execute(commands: (Freenect) -> T): T {
return commandsExecutor.execute(commands)
}
override fun shutdown() {
logger.info("Shutting down Kinect1 support")
callSync { running = false }
runner.join()
}
private inline fun <T> callSync(crossinline block: () -> T): T {
val task = FutureTask<T>(Callable { block() })
taskQueue.add(task)
return task.get()
}
private inner class FreenectDevice(val num: Int) {
val depthCamera = FreenectDepthCamera()
val fnDev = freenect_device()
val devCtx = Freenect(fnCtx, fnUsbCtx, fnDev)
init {
logger.info { "Opening Kinect1 device num: $num" }
verify(freenect_open_device(fnCtx, fnDev, num))
}
val expectingEvents: Boolean
get() = depthCamera.expectingEvents // or other device in the future
fun shutdown() {
logger.info { "Shutting down Kinect1 device num: $num" }
if (!fnDev.isNull) {
verifyOnShutdown(freenect_stop_depth(fnDev))
verifyOnShutdown(freenect_close_device(fnDev))
}
}
inner class FreenectDepthCamera {
val width: Int = 640
val height: Int = 480
private val bytes = ByteBuffer.allocateDirect(width * height * 2)
init { bytes.order(ByteOrder.nativeOrder()) }
private val currentBytesRef = AtomicReference<ByteBuffer?>()
private val freenectDepthCb = object : freenect_depth_cb() {
override fun call(dev: freenect_device?, depth: Pointer?, timestamp: Int) {
logger.trace { "depth frame received for Kinect1 device: $num, at: $timestamp" }
currentBytesRef.set(bytes)
}
}
val bytesSupplier = Supplier<ByteBuffer?> { currentBytesRef.getAndSet(null) }
val enabler = object : KinectFeatureEnabler {
private val atomicEnabled = AtomicBoolean(false)
private val inProgress = AtomicBoolean(false)
override var enabled // usually called from rendering thread
get() = atomicEnabled.get()
set(value) {
if (atomicEnabled.get() == value) {
logger.warn { "Current state requested - doing nothing, Kinect1 device: $num, enabled=$value" }
return
}
if (!inProgress.getAndSet(true)) {
if (value) {
callSync {
try {
start()
atomicEnabled.set(true)
updateExpectingEvents()
} finally { inProgress.set(false) }
}
} else {
callSync {
try {
stop()
atomicEnabled.set(false)
updateExpectingEvents()
} finally { inProgress.set(false) }
}
}
} else {
logger.warn { "Operation in progress, Kinect1 device: $num, requested enabled=$value" }
}
}
}
val expectingEvents: Boolean
get() = depthCamera.enabler.enabled
private fun start() {
logger.info { "Enabling Kinect1 depth camera, device num: $num" }
verify(freenect_set_depth_mode(
fnDev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT))
)
verify(freenect_set_depth_buffer(fnDev, Pointer(bytes)))
verify(freenect_start_depth(fnDev))
Thread.sleep(depthCameraInitializationDelay) // here is the hack
freenect_set_depth_callback(fnDev, freenectDepthCb)
}
private fun stop() {
logger.info { "Disabling Kinect1 depth camera, device num: $num" }
verify(freenect_stop_depth(fnDev))
}
}
}
private fun updateExpectingEvents() {
expectingEvents = devices.any { device -> device.expectingEvents }
}
private fun verifyOnShutdown(ret: Int) {
if (ret != 0) {
logger.error { "Unexpected return value while shutting down Kinect1 support: $ret" }
}
}
private fun verify(ret: Int): Int {
if (ret < 0) {
throw KinectException("Kinect1 error: ret=$ret")
}
return ret
}
}

View File

@@ -0,0 +1,23 @@
#ifndef KINECT_FLIPV
layout(origin_upper_left) in vec4 gl_FragCoord;
#endif
uniform usampler2D tex0; // kinect raw
#ifdef KINECT_FLIPH
uniform int resolutionXMinus1;
#endif
out float outDepth; // measured in meters
const uint UINT_MAX_KINECT_DEPTH = 2047u;
void main() {
ivec2 uv = ivec2(gl_FragCoord);
#ifdef KINECT_FLIPH
uv = ivec2(resolutionXMinus1 - uv.x, uv.y);
#endif
uint uintDepth = texelFetch(tex0, uv, 0).r;
float depth = float(uintDepth);
outDepth = (uintDepth < UINT_MAX_KINECT_DEPTH)
? 1.0 / (depth * -0.0030711016 + 3.3309495161)
: 0.0;
}

View File

@@ -178,6 +178,8 @@ include(
"orx-jvm:orx-kinect-v1-natives-macos",
"orx-jvm:orx-kinect-v1-natives-windows",
"orx-jvm:orx-kinect-v1-demo",
"orx-jvm:orx-video-profiles"
"orx-jvm:orx-video-profiles",
"orx-depth-camera",
"orx-jvm:orx-depth-camera-calibrator"
)
)