[orx-runway] Remove orx-runway

This commit is contained in:
Edwin Jakobs
2025-01-20 12:05:37 +01:00
parent 38b7f52d46
commit 8e75b5628e
23 changed files with 0 additions and 953 deletions

View File

@@ -1,6 +0,0 @@
# orx-runway
Interfaces with the RunwayML machine learning library that provides features like
motion capture, image synthesis, object recognition, style transfer and more.
More info at [runwayml.com](https://runwayml.com/).

View File

@@ -1,11 +0,0 @@
plugins {
org.openrndr.extra.convention.`kotlin-jvm`
}
dependencies {
implementation(libs.openrndr.application)
implementation(libs.openrndr.math)
implementation(libs.gson)
demoImplementation(libs.openrndr.ffmpeg)
demoImplementation(project(":orx-fx"))
}

View File

@@ -1,26 +0,0 @@
//import org.openrndr.application
//import org.openrndr.draw.*
//import org.openrndr.extra.runway.*
//
///**
// * This example requires a `runway/BASNet` model to be active in Runway.
// */
//fun main() = application {
// configure {
// width = 331
// height = 400
// }
//
// program {
// val image = loadImage("demo-data/images/life-cover.jpg")
//
// val result: BASNETResult =
// runwayQuery("http://localhost:8000/query", BASNETRequest(image.toData()))
//
// val segmentImage = ColorBuffer.fromData(result.image)
//
// extend {
// drawer.image(segmentImage, 0.0, 0.0)
// }
// }
//}

View File

@@ -1,46 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.isolatedWithTarget
//import org.openrndr.draw.loadFont
//import org.openrndr.draw.renderTarget
//import org.openrndr.extra.runway.*
//import org.openrndr.math.smoothstep
//
///**
// * This demonstrates an image reinterpretation effect.
// * This example requires a `runway/BigBiGAN` model to be active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 512
// height = 256
// }
//
// program {
// val rt = renderTarget(256, 256) {
// colorBuffer()
// }
// val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 256.0)
// val alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
// extend {
// drawer.isolatedWithTarget(rt) {
// drawer.clear(ColorRGBa.BLACK)
// drawer.ortho(rt)
// val it = seconds.toInt()
// val t = seconds - it
// drawer.fill = ColorRGBa.PINK.shade(smoothstep(0.0, 0.2, t) * smoothstep(1.0, 0.8, t))
// drawer.fontMap = font
// drawer.text("" + alphabet[seconds.toInt()], 64.0, 128.0 + 64)
// }
// val result: BigBiGANResult =
// runwayQuery("http://localhost:8000/query", BigBiGANQuery(rt.colorBuffer(0).toData()))
//
// val image = ColorBuffer.fromData(result.outputImage)
// drawer.image(rt.colorBuffer(0))
// drawer.image(image, 256.0, 0.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,49 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.*
//import org.openrndr.extra.runway.*
//
///**
// * This demonstrates an image feedback effect. It starts from a single image.
// * The BigBiGAN generates a new image from the input, this program feeds the
// * generated image back into the model (with an additional distortion).
// * This example requires a `runway/BigBiGAN` model to be active in Runway.
// */
//fun main() = application {
// configure {
// width = 512
// height = 512
// }
//
// program {
// val rt = renderTarget(256, 256) {
// colorBuffer()
// }
// val startImage = loadImage("demo-data/images/portrait.jpg")
//
// drawer.isolatedWithTarget(rt) {
// drawer.ortho(rt)
// drawer.clear(ColorRGBa.BLACK)
// drawer.image(startImage, (rt.width - startImage.width) / 2.0, (rt.height - startImage.height) / 2.0)
// }
//
// extend {
// val result: BigBiGANResult =
// runwayQuery("http://localhost:8000/query", BigBiGANQuery(rt.colorBuffer(0).toData()))
//
// val image = ColorBuffer.fromData(result.outputImage)
// drawer.image(image, 0.0, 0.0, 512.0, 512.0)
//
// drawer.isolatedWithTarget(rt) {
// drawer.ortho(rt)
// drawer.translate(image.width / 2.0, image.height / 2.0)
// drawer.rotate(10.0)
// drawer.translate(-image.width / 2.0, -image.height / 2.0)
// drawer.drawStyle.colorMatrix = tint(ColorRGBa.WHITE.opacify(0.5))
// drawer.image(image)
// }
// image.destroy()
// }
// }
//}
//

View File

@@ -1,36 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.loadFont
//import org.openrndr.extra.runway.AttnGANRequest
//import org.openrndr.extra.runway.AttnGANResult
//import org.openrndr.extra.runway.runwayQuery
//import java.io.File
//
///**
// * This demonstrates a text to image network. It generates images from single words.
// * This example requires a `runway/AttnGAN` model to be active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 768
// height = 576
// }
//
// program {
// val runwayHost = "http://localhost:8000/query"
// val words = File("demo-data/words/words.txt").readText().split("\n")
// val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 72.0)
// extend {
// val text = words.random()
// val result: AttnGANResult = runwayQuery(runwayHost, AttnGANRequest(text))
// val image = ColorBuffer.fromUrl(result.result)
// drawer.fontMap = font
// drawer.image(image, (width - image.width) / 2.0, (height - image.height) / 2.0)
// drawer.fill = ColorRGBa.PINK
// drawer.text(text, 40.0, height / 2.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,39 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.loadFont
//import org.openrndr.extra.runway.AttnGANRequest
//import org.openrndr.extra.runway.AttnGANResult
//import org.openrndr.extra.runway.runwayQuery
//import java.io.File
//
///**
// * This demonstrates a text to image network. It generates images from simple sentences.
// * This example requires a `runway/AttnGAN` model to be active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 768
// height = 576
// }
//
// program {
// val runwayHost = "http://localhost:8000/query"
// val nouns = File("demo-data/words/nouns.txt").readText().split("\n")
// val prepositions = File("demo-data/words/prepositions.txt").readText().split("\n")
// val adjectives = File("demo-data/words/adjectives.txt").readText().split("\n")
// val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 36.0)
//
// extend {
// val text = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
// val result: AttnGANResult = runwayQuery(runwayHost, AttnGANRequest(text))
// val image = ColorBuffer.fromUrl(result.result)
// drawer.fontMap = font
// drawer.image(image, (width - image.width) / 2.0, (height - image.height) / 2.0)
// drawer.fill = ColorRGBa.PINK
// drawer.text(text, 40.0, height - 40.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,40 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.isolatedWithTarget
//import org.openrndr.draw.loadImage
//import org.openrndr.draw.renderTarget
//import org.openrndr.extra.runway.*
//
///**
// * This demonstrates the body estimation model of DensePose
// * This example requires a `runway/DensePose` model active in Runway.
// */
//fun main() = application {
// configure {
// width = 512
// height = 512
// }
//
// program {
// val rt = renderTarget(512, 512) {
// colorBuffer()
// }
// val startImage = loadImage("demo-data/images/peopleCity01.jpg")
//
// drawer.isolatedWithTarget(rt) {
// drawer.ortho(rt)
// drawer.clear(ColorRGBa.BLACK)
// drawer.image(startImage, (rt.width - startImage.width) / 2.0, (rt.height - startImage.height) / 2.0)
// }
//
// extend {
// val result: DensePoseResult =
// runwayQuery("http://localhost:8000/query", DensePoseQuery(rt.colorBuffer(0).toData()))
// val image = ColorBuffer.fromData(result.output)
//
// drawer.image(image, 0.0, 0.0, 512.0, 512.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,41 +0,0 @@
//import org.openrndr.application
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.colorBuffer
//import org.openrndr.extra.fx.transform.FlipVertically
//import org.openrndr.extra.runway.DeOldifyRequest
//import org.openrndr.extra.runway.DeOldifyResponse
//import org.openrndr.extra.runway.runwayQuery
//import org.openrndr.ffmpeg.VideoPlayerFFMPEG
//
///**
// * This demonstrates the Deoldify model, which colors grayscale images
// * This example requires a `reiinakano/DeOldify` model active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 1280
// height = 360
// }
//
// program {
// val image = colorBuffer(640, 360)
// val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
// camera.play()
// val flip = FlipVertically()
//
// camera.newFrame.listen {
// flip.apply(it.frame, image)
// }
//
// extend {
// camera.draw(drawer)
// val response: DeOldifyResponse =
// runwayQuery("http://localhost:8000/query", DeOldifyRequest(image.toDataUrl()))
//
// val image = ColorBuffer.fromUrl(response.image)
// drawer.image(image, 640.0, 0.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,50 +0,0 @@
//import org.openrndr.application
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.colorBuffer
//import org.openrndr.extra.fx.transform.FlipVertically
//import org.openrndr.extra.runway.DeOldifyRequest
//import org.openrndr.extra.runway.DeOldifyResponse
//import org.openrndr.extra.runway.runwayQuery
//import org.openrndr.ffmpeg.ScreenRecorder
//import org.openrndr.ffmpeg.VideoPlayerConfiguration
//import org.openrndr.ffmpeg.VideoPlayerFFMPEG
//
///**
// * This demonstrates the Deoldify model, which colors grayscale images
// * This example requires a `reiinakano/DeOldify` model active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 1280
// height = 480
// }
//
// program {
// val image = colorBuffer(640, 480)
// val vc = VideoPlayerConfiguration().apply {
// allowFrameSkipping = false
// }
// // -- you will have to supply your own video here
// val video = VideoPlayerFFMPEG.fromFile("data/videos/night_of_the_living_dead_512kb.mp4", configuration = vc)
// video.play()
// val flip = FlipVertically()
//
// video.newFrame.listen {
// flip.apply(it.frame, image)
// }
// extend(ScreenRecorder()) {
// frameRate = 30
// }
// extend {
// video.draw(drawer, 0.0, 0.0, 640.0, 480.0)
// val response: DeOldifyResponse =
// runwayQuery("http://localhost:8000/query", DeOldifyRequest(image.toDataUrl()))
//
// val image = ColorBuffer.fromUrl(response.image)
//
// drawer.image(image, 640.0, 0.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,50 +0,0 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.FaceLandmarksRequest
import org.openrndr.extra.runway.FaceLandmarksResponse
import org.openrndr.extra.runway.runwayQuery
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
import org.openrndr.math.Vector2
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = colorBuffer(640, 360)
val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
camera.play()
val flip = FlipVertically()
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 12.0)
camera.newFrame.listen {
flip.apply(it.frame, image)
}
extend {
camera.draw(drawer)
drawer.fontMap = font
try {
val response: FaceLandmarksResponse =
runwayQuery("http://localhost:8000/query", FaceLandmarksRequest(image.toDataUrl()))
val rx = image.width / 720.0
val ry = 360.0 / image.height
(response.labels zip response.points).forEach {
val position = Vector2(it.second[0] * 720.0 * rx, it.second[1] * 360.0 * ry)
drawer.fill = ColorRGBa.PINK
drawer.circle(position, 10.0)
drawer.text(it.first, position)
}
} catch (e: IllegalStateException) {
e.printStackTrace()
}
}
}
}

View File

@@ -1,53 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.draw.loadFont
//import org.openrndr.extra.runway.*
//import org.openrndr.ffmpeg.ScreenRecorder
//import java.io.File
//
///**
// * This demonstrates a feedback loop
// * This example requires:
// * a `runway/im2txt` model active in Runway on port 8000
// * a `runway/AttnGAN` model active in Runway on port 8001
// */
//
//fun main() = application {
// configure {
// width = 768
// height = 576
// }
//
// program {
//
// val attnHost = "http://localhost:8001/query"
// val im2txtHost = "http://localhost:8000/query"
//
// val nouns = File("demo-data/words/nouns.txt").readText().split("\n")
// val prepositions = File("demo-data/words/prepositions.txt").readText().split("\n")
// val adjectives = File("demo-data/words/adjectives.txt").readText().split("\n")
//
// val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 36.0)
//
// var text = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
//
// extend(ScreenRecorder()) {
// frameRate = 1
// }
//
// extend {
// val result: AttnGANResult = runwayQuery(attnHost, AttnGANRequest(text))
// val image = ColorBuffer.fromUrl(result.result)
// drawer.fontMap = font
// drawer.image(image, (width - image.width)/2.0, (height - image.height)/2.0)
//
// val result2:Im2txtResult = runwayQuery(im2txtHost, Im2txtRequest(image.toDataUrl()))
// text = result2.caption
//
// drawer.fill = ColorRGBa.PINK
// drawer.text(text, 40.0, height - 40.0)
// image.destroy()
// }
// }
//}

View File

@@ -1,54 +0,0 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.Im2txtRequest
import org.openrndr.extra.runway.Im2txtResult
import org.openrndr.extra.runway.runwayQuery
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
/**
* This demonstrates an image to text network. It generates caption texts from a camera image
* This example requires a `runway/im2txt` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = colorBuffer(640, 360)
val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
camera.play()
val flip = FlipVertically()
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
camera.newFrame.listen {
flip.apply(it.frame, image)
}
extend(ScreenRecorder()) {
frameRate = 1
}
extend {
camera.draw(drawer)
drawer.fontMap = font
try {
val response: Im2txtResult =
runwayQuery("http://localhost:8000/query", Im2txtRequest(image.toDataUrl()))
drawer.fontMap = font
drawer.fill = ColorRGBa.PINK
drawer.text(response.caption, 40.0, height - 40.0)
} catch (e: IllegalStateException) {
e.printStackTrace()
}
}
}
}

View File

@@ -1,38 +0,0 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.loadFont
import org.openrndr.draw.loadImage
import org.openrndr.extra.runway.Im2txtRequest
import org.openrndr.extra.runway.Im2txtResult
import org.openrndr.extra.runway.runwayQuery
/**
* This demonstrates an image to text network. It generates caption texts from a single file
* This example requires a `runway/im2txt` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = loadImage("demo-data/images/pm5544.png")
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
extend {
drawer.fontMap = font
val response: Im2txtResult =
runwayQuery("http://localhost:8000/query", Im2txtRequest(image.toDataUrl()))
drawer.image(image)
drawer.fontMap = font
drawer.fill = ColorRGBa.PINK
drawer.text(response.caption, 40.0, height - 40.0)
Thread.sleep(4000)
}
}
}

View File

@@ -1,66 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.Writer
//import org.openrndr.draw.colorBuffer
//import org.openrndr.draw.loadFont
//import org.openrndr.extra.fx.transform.FlipVertically
//import org.openrndr.extra.runway.DenseCapRequest
//import org.openrndr.extra.runway.DenseCapResponse
//import org.openrndr.extra.runway.runwayQuery
//import org.openrndr.ffmpeg.ScreenRecorder
//import org.openrndr.ffmpeg.VideoPlayerFFMPEG
//import org.openrndr.shape.Rectangle
//
//
///**
// * This demonstrates an object to text network. It generates caption texts from objects detected in
// * the camera image
// * This example requires a `runway/DenseCap` model to be active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 768
// height = 576
// }
//
// program {
// val image = colorBuffer(640, 360)
// val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
// camera.play()
// val flip = FlipVertically()
//
// val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
// camera.newFrame.listen {
// flip.apply(it.frame, image)
// }
// extend(ScreenRecorder()) {
// frameRate = 1
// }
// extend {
// camera.draw(drawer)
// drawer.fontMap = font
// val response: DenseCapResponse =
// runwayQuery("http://localhost:8000/query", DenseCapRequest(image.toDataUrl(), maxDetections = 1))
//
// drawer.fontMap = font
// drawer.image(image)
//
// for (i in response.bboxes.indices) {
// val width = (response.bboxes[i][2] - response.bboxes[i][0]) * image.width
// val height = (response.bboxes[i][3] - response.bboxes[i][1]) * image.height
// val x = response.bboxes[i][0] * image.width
// val y = response.bboxes[i][1] * image.height
// drawer.fill = null
// drawer.stroke = ColorRGBa.PINK
// drawer.rectangle(x, y, width, height)
// drawer.stroke = null
// drawer.fill = ColorRGBa.PINK
// val w = Writer(drawer)
// w.box = Rectangle(x + 10.0, y + 10.0, width - 20.0, height - 20.0)
// w.newLine()
// w.text(response.classes[i])
// }
// }
// }
//}

View File

@@ -1,26 +0,0 @@
//import org.openrndr.application
//import org.openrndr.draw.*
//import org.openrndr.extra.runway.*
//
///**
// * This example requires a `runway/Person-Segmentation` model to be active in Runway.
// */
//fun main() = application {
// configure {
// width = 331
// height = 400
// }
//
// program {
// val image = loadImage("demo-data/images/life-cover.jpg")
//
// val result: PersonSegmentationResult =
// runwayQuery("http://localhost:8000/query", PersonSegmentationRequest(image.toData(), 0.2))
//
// val segmentImage = ColorBuffer.fromData(result.image)
//
// extend {
// drawer.image(segmentImage, 0.0, 0.0)
// }
// }
//}

View File

@@ -1,36 +0,0 @@
//import org.openrndr.application
//import org.openrndr.draw.ColorBuffer
//import org.openrndr.extra.runway.StyleGANRequest
//import org.openrndr.extra.runway.StyleGANResponse
//import org.openrndr.extra.runway.runwayQuery
///**
// * This demonstrates an image synthesizer.
// * StyleGAN accepts a 512 dimensional vector from which it generates images.
// * This example requires a `runway/StyleGAN` model to be active in Runway.
// * This also works with `eryksalvaggio/Ascinte_Seated`
// */
//fun main() = application {
// configure {
// width = 512
// height = 512
// }
//
// program {
// val latentSpaceVector = MutableList(512) { Math.random() }
//
// extend {
// val result: StyleGANResponse =
// runwayQuery("http://localhost:8000/query", StyleGANRequest(latentSpaceVector, 0.2))
//
// val image = ColorBuffer.fromUrl(result.image)
//
// drawer.image(image, 0.0, 0.0, 512.0, 512.0)
//
// for (i in latentSpaceVector.indices) {
// latentSpaceVector[i] += (Math.random() - 0.5) * 0.1
// }
//
// image.destroy()
// }
// }
//}

View File

@@ -1,55 +0,0 @@
//import org.openrndr.application
//import org.openrndr.color.ColorRGBa
//import org.openrndr.draw.Writer
//import org.openrndr.draw.loadFont
//import org.openrndr.extra.runway.Gpt2Request
//import org.openrndr.extra.runway.Gpt2Result
//import org.openrndr.extra.runway.runwayQuery
//import org.openrndr.shape.Rectangle
//
//import java.io.File
//
///**
// * This demonstrates a prompt to text model. It generates a longer text sequence from a prompt.
// * This example requires a `runway/GPT-2` model to be active in Runway.
// */
//
//fun main() = application {
// configure {
// width = 768
// height = 768
// }
//
// program {
//
// val runwayHost = "http://localhost:8000/query"
//
// val nouns = File("demo-data/words/nouns.txt").readText().split("\n")
// val prepositions = File("demo-data/words/prepositions.txt").readText().split("\n")
// val adjectives = File("demo-data/words/adjectives.txt").readText().split("\n")
//
// val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 36.0)
//
// val promptFont = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
//
// extend {
// val prompt = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
// drawer.fontMap = promptFont
// val wp = Writer(drawer)
// wp.box = Rectangle(20.0, 0.0, width - 40.0, height - 40.0)
// wp.newLine()
// wp.text(prompt)
//
//
// val result: Gpt2Result = runwayQuery(runwayHost, Gpt2Request(prompt, sequenceLength = 128))
//
// drawer.fill = ColorRGBa.PINK
// drawer.fontMap = font
//
// val w = Writer(drawer)
// w.box = Rectangle(20.0, 60.0, width - 40.0, height - 80.0)
// w.newLine()
// w.text(result.text)
// }
// }
//}

View File

@@ -1,26 +0,0 @@
//import org.openrndr.application
//import org.openrndr.draw.*
//import org.openrndr.extra.runway.*
//
///**
// * This example requires a `runway/U-2-Net` model to be active in Runway.
// */
//fun main() = application {
// configure {
// width = 305
// height = 400
// }
//
// program {
// val image = loadImage("demo-data/images/vw-beetle.jpg")
//
// val result: U2NetResult =
// runwayQuery("http://localhost:8000/query", U2NetRequest(image.toData()))
//
// val segmentImage = ColorBuffer.fromData(result.image)
//
// extend {
// drawer.image(segmentImage, 0.0, 0.0)
// }
// }
//}

View File

@@ -1,44 +0,0 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.isolatedWithTarget
import org.openrndr.draw.loadImage
import org.openrndr.draw.renderTarget
import org.openrndr.extra.runway.*
/**
* This demonstrates the body estimation model of PoseNet
* This example requires a `runway/PoseNet` model active in Runway.
*/
fun main() = application {
configure {
width = 512
height = 512
}
program {
val rt = renderTarget(512, 512) {
colorBuffer()
}
val image = loadImage("demo-data/images/peopleCity01.jpg")
drawer.isolatedWithTarget(rt) {
drawer.ortho(rt)
drawer.clear(ColorRGBa.BLACK)
drawer.image(image, (rt.width - image.width) / 2.0, (rt.height - image.height) / 2.0)
}
extend {
val result: PoseNetResponse = runwayQuery("http://localhost:8000/query", PoseNetRequest(rt.colorBuffer(0).toData()))
val poses = result.poses
val scores = result.scores
drawer.image(image, 0.0, 0.0, 512.0, 512.0)
poses.forEach { poses ->
poses.forEach { pose ->
drawer.circle(pose[0]*512.0, pose[1]*512.0, 10.0)
}
}
}
}
}

View File

@@ -1,98 +0,0 @@
package org.openrndr.extra.runway
import com.google.gson.annotations.SerializedName
// -- AttnGAN
class AttnGANRequest(val caption: String)
class AttnGANResult(val result: String)
// -- BASNET
class BASNETRequest(val image: String)
class BASNETResult(val image: String)
// -- BDCN
class BdcnRequest(val input_image: String)
class BdcnResult(val output_image: String)
// -- BigBiGAN
class BigBiGANQuery(@SerializedName("input_image") val inputImage: String)
class BigBiGANResult(@SerializedName("output_image") val outputImage: String)
// -- DensePose
class DensePoseQuery(@SerializedName("input") val input: String)
class DensePoseResult(@SerializedName("output") val output: String)
// -- SPADE-COCO
class SpadeCocoRequest(val semantic_map: String)
class SpadeCocoResult(val output: String)
// -- GPT-2
class Gpt2Request(val prompt: String, val seed: Int = 0, @SerializedName("sequence_length") val sequenceLength: Int = 128)
class Gpt2Result(val text: String)
// -- im2txt
class Im2txtRequest(val image: String)
class Im2txtResult(val caption: String)
// -- PSENet
class PsenetRequest(@SerializedName("input_image") val inputImage: String)
class PsenetResult(val bboxes: Array<Array<Double>>)
// -- Face landmarks
class FaceLandmarksRequest(val photo: String)
class FaceLandmarksResponse(val points: List<List<Double>>, val labels: List<String>)
// -- StyleGAN
/**
* StyleGAN request
* @param z a list of 512 doubles
*/
class StyleGANRequest(val z: List<Double>, val truncation: Double = 1.0)
class StyleGANResponse(val image: String)
// -- DeOldify
class DeOldifyRequest(val image: String, val renderFactor: Int = 20)
class DeOldifyResponse(val image: String)
// -- DenseCap
class DenseCapRequest(val image: String, @SerializedName("max_detections") val maxDetections: Int = 10)
class DenseCapResponse(val bboxes: List<List<Double>>, val classes: List<String>, val scores: List<Double>)
// -- Person-Segmentation
/**
* Automatically detects people and extracts them from photos
*
* @property image
* @property threshold [0.0, 1.0]
*/
class PersonSegmentationRequest(val image: String, val threshold: Double)
class PersonSegmentationResult(val image: String)
// -- PoseNet
class PoseNetRequest(
val image: String,
@SerializedName("estimationType") val estimationType: String = "Multi Pose",
@SerializedName("maxPoseDetections") val maxPoseDetections: Int = 5,
@SerializedName("scoreThreshold") val scoreThreshold: Double = 0.25
)
class PoseNetResponse(val poses: List<List<List<Double>>>, val scores: List<Double>)
// -- U-2-Net
class U2NetRequest(val image: String)
class U2NetResult(val image: String)

View File

@@ -1,62 +0,0 @@
package org.openrndr.extra.runway
import com.google.gson.Gson
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.ImageFileFormat
import java.io.File
import java.net.*
import java.util.*
/**
* Construct a base64 representation of an encoded image
*/
fun ColorBuffer.toData(format: ImageFileFormat = ImageFileFormat.JPG): String {
val tempFile = File.createTempFile("orx-runway", null)
saveToFile(tempFile, format, async = false)
val ref = File(tempFile.absolutePath)
val imageBytes = ref.readBytes()
val encoder = Base64.getEncoder()
val base64Data = encoder.encodeToString(imageBytes)
tempFile.delete()
return "data:image/jpeg;base64,$base64Data"
}
/**
* Perform a Runway query
* @param target url string e.g. http://localhost:8000/query
*/
inline fun <Q, reified R> runwayQuery(target: String, query: Q): R {
try {
val queryJson = Gson().toJson(query)
val connection = URL(target).openConnection() as HttpURLConnection
connection.doOutput = true
connection.connectTimeout = 1_000
connection.readTimeout = 200_000
connection.requestMethod = "POST"
connection.setRequestProperty("Content-Type", "application/json")
connection.setRequestProperty("Accept", "application/json")
val outputStream = connection.outputStream
outputStream.write(queryJson.toByteArray())
outputStream.flush()
val inputStream = connection.inputStream
val responseJson = String(inputStream.readBytes())
inputStream.close()
connection.disconnect()
return Gson().fromJson(responseJson, R::class.java)
} catch (e: SocketTimeoutException) {
error("RunwayML connection timed out '$target'. Check if Runway and model are running.")
} catch (e: ConnectException) {
error("RunwayML connection refused '$target'. Check if Runway and model are running.")
} catch (e: UnknownHostException) {
error("Runway host not found '$target'. Check if Runway and model are running.")
}
}