Add demos for orx-runway

This commit is contained in:
Edwin Jakobs
2020-04-07 16:24:29 +02:00
parent 9200519672
commit eeed2962dd
19 changed files with 681 additions and 8 deletions

View File

@@ -20,8 +20,9 @@ project.ext {
spekVersion = "2.0.10" spekVersion = "2.0.10"
libfreenectVersion = "0.5.7-1.5.2" libfreenectVersion = "0.5.7-1.5.2"
gsonVersion = "2.8.6" gsonVersion = "2.8.6"
antlrVersion = "4.8-1"
} }
switch (org.gradle.internal.os.OperatingSystem.current()) { switch (org.gradle.internal.os.OperatingSystem.current()) {
case org.gradle.internal.os.OperatingSystem.WINDOWS: case org.gradle.internal.os.OperatingSystem.WINDOWS:
project.ext.openrndrOS = "windows" project.ext.openrndrOS = "windows"
@@ -33,6 +34,7 @@ switch (org.gradle.internal.os.OperatingSystem.current()) {
project.ext.openrndrOS = "macos" project.ext.openrndrOS = "macos"
break break
} }
dokka { dokka {
moduleName = "$rootProject.name" moduleName = "$rootProject.name"
outputDirectory = "$buildDir/docs" outputDirectory = "$buildDir/docs"

View File

@@ -1,3 +1,21 @@
sourceSets {
demo {
java {
srcDirs = ["src/demo/kotlin"]
compileClasspath += main.getCompileClasspath()
runtimeClasspath += main.getRuntimeClasspath()
}
}
}
dependencies { dependencies {
implementation "com.google.code.gson:gson:$gsonVersion" implementation "com.google.code.gson:gson:$gsonVersion"
demoImplementation("org.openrndr:openrndr-core:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-gl3:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-gl3-natives-$openrndrOS:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-extensions:$openrndrVersion")
demoImplementation("org.openrndr:openrndr-ffmpeg:$openrndrVersion")
demoRuntimeOnly("org.openrndr:openrndr-ffmpeg-natives-$openrndrOS:$openrndrVersion")
demoImplementation(project(":orx-fx"))
demoImplementation(sourceSets.getByName("main").output)
} }

View File

@@ -0,0 +1,44 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.*
import org.openrndr.extra.runway.*
import org.openrndr.math.smoothstep
import org.openrndr.resourceUrl
/**
* This demonstrates an image reinterpretation effect.
* This example requires a `runway/BigBiGAN` model to be active in Runway.
*/
fun main() = application {
configure {
width = 512
height = 256
}
program {
val rt = renderTarget(256, 256) {
colorBuffer()
}
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 256.0)
val alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
extend {
drawer.isolatedWithTarget(rt) {
drawer.background(ColorRGBa.BLACK)
drawer.ortho(rt)
val it = seconds.toInt()
val t = seconds-it
drawer.fill = ColorRGBa.PINK.shade(smoothstep(0.0,0.2, t)*smoothstep(1.0, 0.8,t))
drawer.fontMap = font
drawer.text(""+alphabet[seconds.toInt()], 64.0, 128.0+64)
}
val result: BigBiGANResult =
runwayQuery("http://localhost:8000/query", BigBiGANQuery(rt.colorBuffer(0).toData()))
val image = ColorBuffer.fromData(result.outputImage)
drawer.image(rt.colorBuffer(0))
drawer.image(image, 256.0 ,0.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,50 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.*
import org.openrndr.extra.runway.*
import org.openrndr.resourceUrl
/**
* This demonstrates an image feedback effect. It starts from a single image.
* The BigBiGAN generates a new image from the input, this program feeds the
* generated image back into the model (with an additional distortion).
* This example requires a `runway/BigBiGAN` model to be active in Runway.
*/
fun main() = application {
configure {
width = 512
height = 512
}
program {
val rt = renderTarget(256, 256) {
colorBuffer()
}
val startImage = loadImage(resourceUrl("/data/images/portrait.jpg"))
drawer.isolatedWithTarget(rt) {
drawer.ortho(rt)
drawer.background(ColorRGBa.BLACK)
drawer.image(startImage, (rt.width - startImage.width)/2.0, (rt.height - startImage.height) / 2.0)
}
extend {
val result: BigBiGANResult =
runwayQuery("http://localhost:8000/query", BigBiGANQuery(rt.colorBuffer(0).toData()))
val image = ColorBuffer.fromData(result.outputImage)
drawer.image(image, 0.0, 0.0, 512.0, 512.0)
drawer.isolatedWithTarget(rt) {
drawer.ortho(rt)
drawer.translate(image.width/2.0, image.height/2.0)
drawer.rotate(10.0)
drawer.translate(-image.width/2.0, -image.height/2.0)
drawer.drawStyle.colorMatrix = tint(ColorRGBa.WHITE.opacify(0.5))
drawer.image(image)
}
image.destroy()
}
}
}

View File

@@ -0,0 +1,36 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.runway.*
import org.openrndr.resourceUrl
import java.io.File
import java.net.URL
/**
* This demonstrates a text to image network. It generates images from single words.
* This example requires a `runway/AttnGAN` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val runwayHost = "http://localhost:8000/query"
val dictionary = URL(resourceUrl("/data/dictionary/words.txt")).readText().split("\n")
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 72.0)
extend {
val text = dictionary.random()
val result: AttnGANResult = runwayQuery(runwayHost, AttnGANRequest(text))
val image = ColorBuffer.fromUrl(result.result)
drawer.fontMap = font
drawer.image(image, (width - image.width)/2.0, (height - image.height)/2.0)
drawer.fill = ColorRGBa.PINK
drawer.text(text, 40.0, height / 2.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,39 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.runway.*
import org.openrndr.resourceUrl
import java.io.File
import java.net.URL
/**
* This demonstrates a text to image network. It generates images from simple sentences.
* This example requires a `runway/AttnGAN` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val runwayHost = "http://localhost:8000/query"
val nouns = URL(resourceUrl("/data/dictionary/nouns.txt")).readText().split("\n")
val prepositions = URL(resourceUrl("/data/dictionary/prepositions.txt")).readText().split("\n")
val adjectives = URL(resourceUrl("/data/dictionary/adjectives.txt")).readText().split("\n")
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 36.0)
extend {
val text = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
val result: AttnGANResult = runwayQuery(runwayHost, AttnGANRequest(text))
val image = ColorBuffer.fromUrl(result.result)
drawer.fontMap = font
drawer.image(image, (width - image.width)/2.0, (height - image.height)/2.0)
drawer.fill = ColorRGBa.PINK
drawer.text(text, 40.0, height - 40.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,38 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.*
import org.openrndr.extra.runway.*
import org.openrndr.resourceUrl
/**
* This demonstrates the body estimation model of DensePose
* This example requires a `runway/DensePose` model active in Runway.
*/
fun main() = application {
configure {
width = 512
height = 512
}
program {
val rt = renderTarget(512, 512) {
colorBuffer()
}
val startImage = loadImage(resourceUrl("/data/images/peopleCity01.jpg"))
drawer.isolatedWithTarget(rt) {
drawer.ortho(rt)
drawer.background(ColorRGBa.BLACK)
drawer.image(startImage, (rt.width - startImage.width)/2.0, (rt.height - startImage.height) / 2.0)
}
extend {
val result: DensePoseResult =
runwayQuery("http://localhost:8000/query", DensePoseQuery(rt.colorBuffer(0).toData()))
val image = ColorBuffer.fromData(result.output)
drawer.image(image, 0.0, 0.0, 512.0, 512.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,42 @@
import org.openrndr.application
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.DeOldifyRequest
import org.openrndr.extra.runway.DeOldifyResponse
import org.openrndr.extra.runway.runwayQuery
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
/**
* This demonstrates the Deoldify model, which colors grayscale images
* This example requires a `reiinakano/DeOldify` model active in Runway.
*/
fun main() = application {
configure {
width = 1280
height = 360
}
program {
val image = colorBuffer(640, 360)
val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
camera.play()
val flip = FlipVertically()
camera.newFrame.listen {
flip.apply(it.frame, image)
}
extend {
camera.draw(drawer)
val response: DeOldifyResponse =
runwayQuery("http://localhost:8000/query", DeOldifyRequest(image.toDataUrl()))
val image = ColorBuffer.fromUrl(response.image)
drawer.image(image, 640.0, 0.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,50 @@
import org.openrndr.application
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.colorBuffer
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.DeOldifyRequest
import org.openrndr.extra.runway.DeOldifyResponse
import org.openrndr.extra.runway.runwayQuery
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.ffmpeg.VideoPlayerConfiguration
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
/**
* This demonstrates the Deoldify model, which colors grayscale images
* This example requires a `reiinakano/DeOldify` model active in Runway.
*/
fun main() = application {
configure {
width = 1280
height = 480
}
program {
val image = colorBuffer(640, 480)
val vc = VideoPlayerConfiguration().apply {
allowFrameSkipping = false
}
// -- you will have to supply your own video here
val video = VideoPlayerFFMPEG.fromFile("data/videos/night_of_the_living_dead_512kb.mp4", configuration = vc)
video.play()
val flip = FlipVertically()
video.newFrame.listen {
flip.apply(it.frame, image)
}
extend(ScreenRecorder()) {
frameRate = 30
}
extend {
video.draw(drawer, 0.0, 0.0, 640.0, 480.0)
val response: DeOldifyResponse =
runwayQuery("http://localhost:8000/query", DeOldifyRequest(image.toDataUrl()))
val image = ColorBuffer.fromUrl(response.image)
drawer.image(image, 640.0, 0.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,51 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.FaceLandmarksRequest
import org.openrndr.extra.runway.FaceLandmarksResponse
import org.openrndr.extra.runway.runwayQuery
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
import org.openrndr.math.Vector2
import org.openrndr.resourceUrl
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = colorBuffer(640, 360)
val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
camera.play()
val flip = FlipVertically()
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 12.0)
camera.newFrame.listen {
flip.apply(it.frame, image)
}
extend {
camera.draw(drawer)
drawer.fontMap = font
try {
val response: FaceLandmarksResponse =
runwayQuery("http://localhost:8000/query", FaceLandmarksRequest(image.toDataUrl()))
val rx = image.width / 720.0
val ry = 360.0 / image.height
(response.labels zip response.points).forEach {
val position = Vector2(it.second[0] * 720.0 * rx, it.second[1] * 360.0 * ry)
drawer.fill = ColorRGBa.PINK
drawer.circle(position, 10.0)
drawer.text(it.first, position)
}
} catch (e: IllegalStateException) {
e.printStackTrace()
}
}
}
}

View File

@@ -0,0 +1,55 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.runway.*
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.resourceUrl
import java.io.File
import java.net.URL
/**
* This demonstrates a feedback loop
* This example requires:
* a `runway/im2txt` model active in Runway on port 8000
* a `runway/AttnGAN` model active in Runway on port 8001
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val attnHost = "http://localhost:8001/query"
val im2txtHost = "http://localhost:8000/query"
val nouns = URL(resourceUrl("/data/dictionary/nouns.txt")).readText().split("\n")
val prepositions = URL(resourceUrl("/data/dictionary/prepositions.txt")).readText().split("\n")
val adjectives = URL(resourceUrl("/data/dictionary/adjectives.txt")).readText().split("\n")
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 36.0)
var text = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
extend(ScreenRecorder()) {
frameRate = 1
}
extend {
val result: AttnGANResult = runwayQuery(attnHost, AttnGANRequest(text))
val image = ColorBuffer.fromUrl(result.result)
drawer.fontMap = font
drawer.image(image, (width - image.width)/2.0, (height - image.height)/2.0)
val result2:Im2txtResult = runwayQuery(im2txtHost, Im2txtRequest(image.toDataUrl()))
text = result2.caption
drawer.fill = ColorRGBa.PINK
drawer.text(text, 40.0, height - 40.0)
image.destroy()
}
}
}

View File

@@ -0,0 +1,53 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.*
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
import org.openrndr.math.Vector2
/**
* This demonstrates an image to text network. It generates caption texts from a camera image
* This example requires a `runway/im2txt` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = colorBuffer(640, 360)
val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
camera.play()
val flip = FlipVertically()
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 24.0)
camera.newFrame.listen {
flip.apply(it.frame, image)
}
extend(ScreenRecorder()) {
frameRate = 1
}
extend {
camera.draw(drawer)
drawer.fontMap = font
try {
val response: Im2txtResult =
runwayQuery("http://localhost:8000/query", Im2txtRequest(image.toDataUrl()))
drawer.fontMap = font
drawer.fill = ColorRGBa.PINK
drawer.text(response.caption, 40.0, height - 40.0)
} catch (e: IllegalStateException) {
e.printStackTrace()
}
}
}
}

View File

@@ -0,0 +1,38 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.loadFont
import org.openrndr.draw.loadImage
import org.openrndr.extra.runway.Im2txtRequest
import org.openrndr.extra.runway.Im2txtResult
import org.openrndr.extra.runway.runwayQuery
/**
* This demonstrates an image to text network. It generates caption texts from a single file
* This example requires a `runway/im2txt` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = loadImage("data/images/pm5544.png")
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 24.0)
extend {
drawer.fontMap = font
val response: Im2txtResult =
runwayQuery("http://localhost:8000/query", Im2txtRequest(image.toDataUrl()))
drawer.image(image)
drawer.fontMap = font
drawer.fill = ColorRGBa.PINK
drawer.text(response.caption, 40.0, height - 40.0)
Thread.sleep(4000)
}
}
}

View File

@@ -0,0 +1,66 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.colorBuffer
import org.openrndr.draw.loadFont
import org.openrndr.extra.fx.transform.FlipVertically
import org.openrndr.extra.runway.DenseCapRequest
import org.openrndr.extra.runway.DenseCapResponse
import org.openrndr.extra.runway.runwayQuery
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
import org.openrndr.shape.Rectangle
import org.openrndr.text.Writer
/**
* This demonstrates an object to text network. It generates caption texts from objects detected in
* the camera image
* This example requires a `runway/DenseCap` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 576
}
program {
val image = colorBuffer(640, 360)
val camera = VideoPlayerFFMPEG.fromDevice(imageWidth = 640, imageHeight = 360)
camera.play()
val flip = FlipVertically()
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 24.0)
camera.newFrame.listen {
flip.apply(it.frame, image)
}
extend(ScreenRecorder()) {
frameRate = 1
}
extend {
camera.draw(drawer)
drawer.fontMap = font
val response: DenseCapResponse =
runwayQuery("http://localhost:8000/query", DenseCapRequest(image.toDataUrl(), maxDetections = 1))
drawer.fontMap = font
drawer.image(image)
for (i in response.bboxes.indices) {
val width = (response.bboxes[i][2] - response.bboxes[i][0]) * image.width
val height = (response.bboxes[i][3] - response.bboxes[i][1]) * image.height
val x = response.bboxes[i][0] * image.width
val y = response.bboxes[i][1] * image.height
drawer.fill = null
drawer.stroke = ColorRGBa.PINK
drawer.rectangle(x, y, width, height)
drawer.stroke = null
drawer.fill = ColorRGBa.PINK
val w = Writer(drawer)
w.box = Rectangle(x + 10.0, y + 10.0, width - 20.0, height - 20.0)
w.newLine()
w.text(response.classes[i])
}
}
}
}

View File

@@ -0,0 +1,36 @@
import org.openrndr.application
import org.openrndr.draw.ColorBuffer
import org.openrndr.extra.runway.StyleGANRequest
import org.openrndr.extra.runway.StyleGANResponse
import org.openrndr.extra.runway.runwayQuery
/**
* This demonstrates an image synthesizer.
* StyleGAN accepts a 512 dimensional vector from which it generates images.
* This example requires a `runway/StyleGAN` model to be active in Runway.
* This also works with `eryksalvaggio/Ascinte_Seated`
*/
fun main() = application {
configure {
width = 512
height = 512
}
program {
val latentSpaceVector = MutableList(512) { Math.random() }
extend {
val result: StyleGANResponse =
runwayQuery("http://localhost:8000/query", StyleGANRequest(latentSpaceVector, 0.2))
val image = ColorBuffer.fromUrl(result.image)
drawer.image(image, 0.0, 0.0, 512.0, 512.0)
for (i in latentSpaceVector.indices) {
latentSpaceVector[i] += (Math.random() - 0.5) * 0.1
}
image.destroy()
}
}
}

View File

@@ -0,0 +1,54 @@
import org.openrndr.application
import org.openrndr.color.ColorRGBa
import org.openrndr.draw.loadFont
import org.openrndr.extra.runway.*
import org.openrndr.ffmpeg.ScreenRecorder
import org.openrndr.resourceUrl
import org.openrndr.shape.Rectangle
import org.openrndr.text.Writer
import java.net.URL
/**
* This demonstrates a prompt to text model. It generates a longer text sequence from a prompt.
* This example requires a `runway/GPT-2` model to be active in Runway.
*/
fun main() = application {
configure {
width = 768
height = 768
}
program {
val runwayHost = "http://localhost:8000/query"
val nouns = URL(resourceUrl("/data/dictionary/nouns.txt")).readText().split("\n")
val prepositions = URL(resourceUrl("/data/dictionary/prepositions.txt")).readText().split("\n")
val adjectives = URL(resourceUrl("/data/dictionary/adjectives.txt")).readText().split("\n")
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 36.0)
val promptFont = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 24.0)
extend {
val prompt = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
drawer.fontMap = promptFont
val wp = Writer(drawer)
wp.box = Rectangle(20.0, 0.0, width - 40.0, height - 40.0)
wp.newLine()
wp.text(prompt)
val result: Gpt2Result = runwayQuery(runwayHost, Gpt2Request(prompt, sequenceLength = 128))
drawer.fill = ColorRGBa.PINK
drawer.fontMap = font
val w = Writer(drawer)
w.box = Rectangle(20.0, 60.0, width - 40.0, height - 80.0)
w.newLine()
w.text(result.text)
}
}
}

View File

@@ -5,10 +5,7 @@ import org.openrndr.draw.ColorBuffer
import org.openrndr.draw.ImageFileFormat import org.openrndr.draw.ImageFileFormat
import java.io.ByteArrayInputStream import java.io.ByteArrayInputStream
import java.io.File import java.io.File
import java.net.HttpURLConnection import java.net.*
import java.net.SocketTimeoutException
import java.net.URL
import java.net.UnknownHostException
import java.util.* import java.util.*
/** /**
@@ -48,7 +45,7 @@ inline fun <Q, reified R> runwayQuery(target: String, query: Q): R {
val queryJson = Gson().toJson(query) val queryJson = Gson().toJson(query)
val connection = URL(target).openConnection() as HttpURLConnection val connection = URL(target).openConnection() as HttpURLConnection
//with(connection) { //with(connection) {
connection.doOutput = true connection.doOutput = true
connection.connectTimeout = 1_000 connection.connectTimeout = 1_000
connection.readTimeout = 200_000 connection.readTimeout = 200_000
connection.requestMethod = "POST" connection.requestMethod = "POST"
@@ -67,8 +64,11 @@ inline fun <Q, reified R> runwayQuery(target: String, query: Q): R {
connection.disconnect() connection.disconnect()
return Gson().fromJson(responseJson, R::class.java) return Gson().fromJson(responseJson, R::class.java)
} catch (e: SocketTimeoutException) { } catch (e: SocketTimeoutException) {
error("RunwayML connection timed out. Check if Runway and model are running.") error("RunwayML connection timed out '$target'. Check if Runway and model are running.")
} catch (e: ConnectException) {
error("RunwayML connection refused '$target'. Check if Runway and model are running.")
} catch (e: UnknownHostException) { } catch (e: UnknownHostException) {
error("Runway host not found. Check if Runway and model are running.") error("Runway host not found '$target'. Check if Runway and model are running.")
} }
} }

View File

@@ -15,6 +15,7 @@ include 'orx-camera',
'orx-gui', 'orx-gui',
'orx-image-fit', 'orx-image-fit',
'orx-kdtree', 'orx-kdtree',
'orx-keyframer',
'orx-mesh-generators', 'orx-mesh-generators',
'orx-midi', 'orx-midi',
'orx-no-clear', 'orx-no-clear',