Merge branch 'master' of github.com:openrndr/orx

 Conflicts:
	build.gradle
This commit is contained in:
Edwin Jakobs
2020-06-10 21:48:45 +02:00
6 changed files with 103 additions and 7 deletions

View File

@@ -14,7 +14,7 @@ buildscript {
apply plugin: 'org.jetbrains.dokka'
project.ext {
openrndrVersion = "0.3.43-rc.10"
openrndrVersion = "0.3.43-rc.11"
kotlinVersion = "1.3.72"
spekVersion = "2.0.10"
libfreenectVersion = "0.5.7-1.5.3"

View File

@@ -13,9 +13,12 @@ def boofcvVersion = "0.34"
dependencies {
demoImplementation(project(":orx-mesh-generators"))
demoImplementation(project(":orx-camera"))
demoImplementation(project(":orx-parameters"))
demoImplementation("org.openrndr:openrndr-core:$openrndrVersion")
demoImplementation("org.openrndr:openrndr-extensions:$openrndrVersion")
demoImplementation("org.openrndr:openrndr-ffmpeg:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-ffmpeg-natives-$openrndrOS:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-gl3:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-gl3-natives-$openrndrOS:$openrndrVersion")
demoImplementation(sourceSets.getByName("main").output)

View File

@@ -0,0 +1,44 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.DepthTestPass
import org.openrndr.draw.DrawPrimitive
import org.openrndr.draw.isolated
import org.openrndr.draw.shadeStyle
import org.openrndr.extras.meshgenerators.boxMesh
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
import org.openrndr.math.Vector3
fun main() {
application {
program {
val cube = boxMesh()
val screen = VideoPlayerFFMPEG.fromScreen(
frameRate = 15.0,
imageWidth = 300,
imageHeight = 300
)
screen.play()
extend {
screen.draw(drawer, true) // update the screen grabber
drawer.isolated {
clear(ColorRGBa.WHITE)
perspective(60.0, width * 1.0 / height, 0.01, 1000.0)
depthWrite = true
depthTestPass = DepthTestPass.LESS_OR_EQUAL
shadeStyle = shadeStyle {
fragmentTransform = "x_fill = texture(p_tex, vec2(1.0-va_texCoord0.x, va_texCoord0.y));"
screen.colorBuffer?.run {
parameter("tex", this)
}
}
rotate(Vector3.UNIT_Z, 90.0)
translate(0.0, 0.0, -120.0)
rotate(Vector3.UNIT_X, seconds * 10)
scale(90.0)
vertexBuffer(cube, DrawPrimitive.TRIANGLES)
}
}
}
}
}

View File

@@ -54,8 +54,3 @@ Supported Gltf features
[source code](src/demo/kotlin/DemoScene03.kt)
![DemoScene03Kt](https://raw.githubusercontent.com/openrndr/orx/media/orx-dnk3/images/DemoScene03Kt.png)
### DemoSkinning01
[source code](src/demo/kotlin/DemoSkinning01.kt)
![DemoSkinning01Kt](https://raw.githubusercontent.com/openrndr/orx/media/orx-dnk3/images/DemoSkinning01Kt.png)

View File

@@ -0,0 +1,44 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.isolatedWithTarget
import org.openrndr.draw.loadImage
import org.openrndr.draw.renderTarget
import org.openrndr.extra.runway.*
/**
* This demonstrates the body estimation model of PoseNet
* This example requires a `runway/PoseNet` model active in Runway.
*/
fun main() = application {
configure {
width = 512
height = 512
}
program {
val rt = renderTarget(512, 512) {
colorBuffer()
}
val image = loadImage("demo-data/images/peopleCity01.jpg")
drawer.isolatedWithTarget(rt) {
drawer.ortho(rt)
drawer.clear(ColorRGBa.BLACK)
drawer.image(image, (rt.width - image.width) / 2.0, (rt.height - image.height) / 2.0)
}
extend {
val result: PoseNetResponse = runwayQuery("http://localhost:8000/query", PoseNetRequest(rt.colorBuffer(0).toData()))
val poses = result.poses
val scores = result.scores
drawer.image(image, 0.0, 0.0, 512.0, 512.0)
poses.forEach { poses ->
poses.forEach { pose ->
drawer.circle(pose[0]*512.0, pose[1]*512.0, 10.0)
}
}
}
}
}

View File

@@ -65,4 +65,14 @@ class DeOldifyResponse(val image: String)
// -- DenseCap
class DenseCapRequest(val image: String, @SerializedName("max_detections") val maxDetections: Int = 10)
class DenseCapResponse(val bboxes: List<List<Double>>, val classes: List<String>, val scores: List<Double>)
class DenseCapResponse(val bboxes: List<List<Double>>, val classes: List<String>, val scores: List<Double>)
// -- PoseNet
class PoseNetRequest(
val image: String,
@SerializedName("estimationType") val estimationType: String = "Multi Pose",
@SerializedName("maxPoseDetections") val maxPoseDetections: Int = 5,
@SerializedName("scoreThreshold") val scoreThreshold: Double = 0.25
)
class PoseNetResponse(val poses: List<List<List<Double>>>, val scores: List<Double>)