Put demo assets into /demo-data/ (#104)
This commit is contained in:
|
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 54 KiB |
@@ -10,7 +10,6 @@ import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.color.rgb
|
||||
import org.openrndr.draw.loadImage
|
||||
import org.openrndr.extensions.SingleScreenshot
|
||||
import org.openrndr.resourceUrl
|
||||
import kotlin.math.cos
|
||||
import kotlin.math.sin
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import org.openrndr.draw.*
|
||||
import org.openrndr.extensions.SingleScreenshot
|
||||
import org.openrndr.extra.fx.blur.*
|
||||
import org.openrndr.math.Polar
|
||||
import org.openrndr.resourceUrl
|
||||
import kotlin.math.sin
|
||||
|
||||
fun main() {
|
||||
@@ -39,8 +38,7 @@ fun main() {
|
||||
// On this buffer we will draw the dry buffer with an effect applied
|
||||
val wet = colorBuffer(dry.width, dry.height)
|
||||
|
||||
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular" +
|
||||
".ttf"), 16.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 16.0)
|
||||
|
||||
extend {
|
||||
// Draw two moving circles
|
||||
|
||||
@@ -3,7 +3,6 @@ package org.openrndr.extra.fx
|
||||
import org.openrndr.draw.ColorFormat
|
||||
import org.openrndr.draw.ColorType
|
||||
import org.openrndr.resourceUrl
|
||||
import java.net.URL
|
||||
|
||||
internal class FilterTools
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import org.openrndr.application
|
||||
import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.draw.*
|
||||
import org.openrndr.draw.ColorBuffer
|
||||
import org.openrndr.draw.isolatedWithTarget
|
||||
import org.openrndr.draw.loadFont
|
||||
import org.openrndr.draw.renderTarget
|
||||
import org.openrndr.extra.runway.*
|
||||
import org.openrndr.math.smoothstep
|
||||
import org.openrndr.resourceUrl
|
||||
|
||||
/**
|
||||
* This demonstrates an image reinterpretation effect.
|
||||
@@ -20,24 +22,24 @@ fun main() = application {
|
||||
val rt = renderTarget(256, 256) {
|
||||
colorBuffer()
|
||||
}
|
||||
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 256.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 256.0)
|
||||
val alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
extend {
|
||||
drawer.isolatedWithTarget(rt) {
|
||||
drawer.clear(ColorRGBa.BLACK)
|
||||
drawer.ortho(rt)
|
||||
val it = seconds.toInt()
|
||||
val t = seconds-it
|
||||
drawer.fill = ColorRGBa.PINK.shade(smoothstep(0.0,0.2, t)*smoothstep(1.0, 0.8,t))
|
||||
val t = seconds - it
|
||||
drawer.fill = ColorRGBa.PINK.shade(smoothstep(0.0, 0.2, t) * smoothstep(1.0, 0.8, t))
|
||||
drawer.fontMap = font
|
||||
drawer.text(""+alphabet[seconds.toInt()], 64.0, 128.0+64)
|
||||
drawer.text("" + alphabet[seconds.toInt()], 64.0, 128.0 + 64)
|
||||
}
|
||||
val result: BigBiGANResult =
|
||||
runwayQuery("http://localhost:8000/query", BigBiGANQuery(rt.colorBuffer(0).toData()))
|
||||
runwayQuery("http://localhost:8000/query", BigBiGANQuery(rt.colorBuffer(0).toData()))
|
||||
|
||||
val image = ColorBuffer.fromData(result.outputImage)
|
||||
drawer.image(rt.colorBuffer(0))
|
||||
drawer.image(image, 256.0 ,0.0)
|
||||
drawer.image(image, 256.0, 0.0)
|
||||
image.destroy()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ import org.openrndr.application
|
||||
import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.draw.*
|
||||
import org.openrndr.extra.runway.*
|
||||
import org.openrndr.resourceUrl
|
||||
|
||||
/**
|
||||
* This demonstrates an image feedback effect. It starts from a single image.
|
||||
@@ -20,12 +19,12 @@ fun main() = application {
|
||||
val rt = renderTarget(256, 256) {
|
||||
colorBuffer()
|
||||
}
|
||||
val startImage = loadImage(resourceUrl("/data/images/portrait.jpg"))
|
||||
val startImage = loadImage("demo-data/images/portrait.jpg")
|
||||
|
||||
drawer.isolatedWithTarget(rt) {
|
||||
drawer.ortho(rt)
|
||||
drawer.clear(ColorRGBa.BLACK)
|
||||
drawer.image(startImage, (rt.width - startImage.width)/2.0, (rt.height - startImage.height) / 2.0)
|
||||
drawer.image(startImage, (rt.width - startImage.width) / 2.0, (rt.height - startImage.height) / 2.0)
|
||||
}
|
||||
|
||||
extend {
|
||||
@@ -37,9 +36,9 @@ fun main() = application {
|
||||
|
||||
drawer.isolatedWithTarget(rt) {
|
||||
drawer.ortho(rt)
|
||||
drawer.translate(image.width/2.0, image.height/2.0)
|
||||
drawer.translate(image.width / 2.0, image.height / 2.0)
|
||||
drawer.rotate(10.0)
|
||||
drawer.translate(-image.width/2.0, -image.height/2.0)
|
||||
drawer.translate(-image.width / 2.0, -image.height / 2.0)
|
||||
drawer.drawStyle.colorMatrix = tint(ColorRGBa.WHITE.opacify(0.5))
|
||||
drawer.image(image)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,10 @@ import org.openrndr.application
|
||||
import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.draw.ColorBuffer
|
||||
import org.openrndr.draw.loadFont
|
||||
import org.openrndr.extra.runway.*
|
||||
import org.openrndr.resourceUrl
|
||||
import org.openrndr.extra.runway.AttnGANRequest
|
||||
import org.openrndr.extra.runway.AttnGANResult
|
||||
import org.openrndr.extra.runway.runwayQuery
|
||||
import java.io.File
|
||||
import java.net.URL
|
||||
|
||||
/**
|
||||
* This demonstrates a text to image network. It generates images from single words.
|
||||
@@ -20,14 +20,14 @@ fun main() = application {
|
||||
|
||||
program {
|
||||
val runwayHost = "http://localhost:8000/query"
|
||||
val dictionary = URL("demo-data/dictionary/words.txt").readText().split("\n")
|
||||
val words = File("demo-data/words/words.txt").readText().split("\n")
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 72.0)
|
||||
extend {
|
||||
val text = dictionary.random()
|
||||
val text = words.random()
|
||||
val result: AttnGANResult = runwayQuery(runwayHost, AttnGANRequest(text))
|
||||
val image = ColorBuffer.fromUrl(result.result)
|
||||
drawer.fontMap = font
|
||||
drawer.image(image, (width - image.width)/2.0, (height - image.height)/2.0)
|
||||
drawer.image(image, (width - image.width) / 2.0, (height - image.height) / 2.0)
|
||||
drawer.fill = ColorRGBa.PINK
|
||||
drawer.text(text, 40.0, height / 2.0)
|
||||
image.destroy()
|
||||
|
||||
@@ -2,10 +2,10 @@ import org.openrndr.application
|
||||
import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.draw.ColorBuffer
|
||||
import org.openrndr.draw.loadFont
|
||||
import org.openrndr.extra.runway.*
|
||||
import org.openrndr.resourceUrl
|
||||
import org.openrndr.extra.runway.AttnGANRequest
|
||||
import org.openrndr.extra.runway.AttnGANResult
|
||||
import org.openrndr.extra.runway.runwayQuery
|
||||
import java.io.File
|
||||
import java.net.URL
|
||||
|
||||
/**
|
||||
* This demonstrates a text to image network. It generates images from simple sentences.
|
||||
@@ -20,17 +20,17 @@ fun main() = application {
|
||||
|
||||
program {
|
||||
val runwayHost = "http://localhost:8000/query"
|
||||
val nouns = URL(resourceUrl("/data/dictionary/nouns.txt")).readText().split("\n")
|
||||
val prepositions = URL(resourceUrl("/data/dictionary/prepositions.txt")).readText().split("\n")
|
||||
val adjectives = URL(resourceUrl("/data/dictionary/adjectives.txt")).readText().split("\n")
|
||||
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 36.0)
|
||||
val nouns = File("demo-data/words/nouns.txt").readText().split("\n")
|
||||
val prepositions = File("demo-data/words/prepositions.txt").readText().split("\n")
|
||||
val adjectives = File("demo-data/words/adjectives.txt").readText().split("\n")
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 36.0)
|
||||
|
||||
extend {
|
||||
val text = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
|
||||
val result: AttnGANResult = runwayQuery(runwayHost, AttnGANRequest(text))
|
||||
val image = ColorBuffer.fromUrl(result.result)
|
||||
drawer.fontMap = font
|
||||
drawer.image(image, (width - image.width)/2.0, (height - image.height)/2.0)
|
||||
drawer.image(image, (width - image.width) / 2.0, (height - image.height) / 2.0)
|
||||
drawer.fill = ColorRGBa.PINK
|
||||
drawer.text(text, 40.0, height - 40.0)
|
||||
image.destroy()
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import org.openrndr.application
|
||||
import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.draw.*
|
||||
import org.openrndr.draw.ColorBuffer
|
||||
import org.openrndr.draw.isolatedWithTarget
|
||||
import org.openrndr.draw.loadImage
|
||||
import org.openrndr.draw.renderTarget
|
||||
import org.openrndr.extra.runway.*
|
||||
import org.openrndr.resourceUrl
|
||||
|
||||
/**
|
||||
* This demonstrates the body estimation model of DensePose
|
||||
@@ -18,12 +20,12 @@ fun main() = application {
|
||||
val rt = renderTarget(512, 512) {
|
||||
colorBuffer()
|
||||
}
|
||||
val startImage = loadImage(resourceUrl("/data/images/peopleCity01.jpg"))
|
||||
val startImage = loadImage("demo-data/images/peopleCity01.jpg")
|
||||
|
||||
drawer.isolatedWithTarget(rt) {
|
||||
drawer.ortho(rt)
|
||||
drawer.clear(ColorRGBa.BLACK)
|
||||
drawer.image(startImage, (rt.width - startImage.width)/2.0, (rt.height - startImage.height) / 2.0)
|
||||
drawer.image(startImage, (rt.width - startImage.width) / 2.0, (rt.height - startImage.height) / 2.0)
|
||||
}
|
||||
|
||||
extend {
|
||||
|
||||
@@ -5,7 +5,6 @@ import org.openrndr.extra.fx.transform.FlipVertically
|
||||
import org.openrndr.extra.runway.DeOldifyRequest
|
||||
import org.openrndr.extra.runway.DeOldifyResponse
|
||||
import org.openrndr.extra.runway.runwayQuery
|
||||
import org.openrndr.ffmpeg.ScreenRecorder
|
||||
import org.openrndr.ffmpeg.VideoPlayerFFMPEG
|
||||
|
||||
/**
|
||||
|
||||
@@ -22,7 +22,7 @@ fun main() = application {
|
||||
camera.play()
|
||||
val flip = FlipVertically()
|
||||
|
||||
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 12.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 12.0)
|
||||
camera.newFrame.listen {
|
||||
flip.apply(it.frame, image)
|
||||
}
|
||||
|
||||
@@ -26,11 +26,11 @@ fun main() = application {
|
||||
val attnHost = "http://localhost:8001/query"
|
||||
val im2txtHost = "http://localhost:8000/query"
|
||||
|
||||
val nouns = URL(resourceUrl("/data/dictionary/nouns.txt")).readText().split("\n")
|
||||
val prepositions = URL(resourceUrl("/data/dictionary/prepositions.txt")).readText().split("\n")
|
||||
val adjectives = URL(resourceUrl("/data/dictionary/adjectives.txt")).readText().split("\n")
|
||||
val nouns = File("demo-data/words/nouns.txt").readText().split("\n")
|
||||
val prepositions = File("demo-data/words/prepositions.txt").readText().split("\n")
|
||||
val adjectives = File("demo-data/words/adjectives.txt").readText().split("\n")
|
||||
|
||||
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 36.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 36.0)
|
||||
|
||||
var text = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ fun main() = application {
|
||||
camera.play()
|
||||
val flip = FlipVertically()
|
||||
|
||||
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
camera.newFrame.listen {
|
||||
flip.apply(it.frame, image)
|
||||
}
|
||||
|
||||
@@ -20,9 +20,9 @@ fun main() = application {
|
||||
}
|
||||
|
||||
program {
|
||||
val image = loadImage("data/images/pm5544.png")
|
||||
val image = loadImage("demo-data/images/pm5544.png")
|
||||
|
||||
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
extend {
|
||||
drawer.fontMap = font
|
||||
val response: Im2txtResult =
|
||||
|
||||
@@ -30,7 +30,7 @@ fun main() = application {
|
||||
camera.play()
|
||||
val flip = FlipVertically()
|
||||
|
||||
val font = loadFont("data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
camera.newFrame.listen {
|
||||
flip.apply(it.frame, image)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import org.openrndr.application
|
||||
import org.openrndr.color.ColorRGBa
|
||||
import org.openrndr.draw.loadFont
|
||||
import org.openrndr.extra.runway.*
|
||||
import org.openrndr.ffmpeg.ScreenRecorder
|
||||
import org.openrndr.resourceUrl
|
||||
import org.openrndr.extra.runway.Gpt2Request
|
||||
import org.openrndr.extra.runway.Gpt2Result
|
||||
import org.openrndr.extra.runway.runwayQuery
|
||||
import org.openrndr.shape.Rectangle
|
||||
import org.openrndr.text.Writer
|
||||
import java.io.File
|
||||
import java.net.URL
|
||||
|
||||
/**
|
||||
@@ -23,13 +24,13 @@ fun main() = application {
|
||||
|
||||
val runwayHost = "http://localhost:8000/query"
|
||||
|
||||
val nouns = URL(resourceUrl("/data/dictionary/nouns.txt")).readText().split("\n")
|
||||
val prepositions = URL(resourceUrl("/data/dictionary/prepositions.txt")).readText().split("\n")
|
||||
val adjectives = URL(resourceUrl("/data/dictionary/adjectives.txt")).readText().split("\n")
|
||||
val nouns = File("demo-data/words/nouns.txt").readText().split("\n")
|
||||
val prepositions = File("demo-data/words/prepositions.txt").readText().split("\n")
|
||||
val adjectives = File("demo-data/words/adjectives.txt").readText().split("\n")
|
||||
|
||||
val font = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 36.0)
|
||||
val font = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 36.0)
|
||||
|
||||
val promptFont = loadFont(resourceUrl("/data/fonts/IBMPlexMono-Regular.ttf"), 24.0)
|
||||
val promptFont = loadFont("demo-data/fonts/IBMPlexMono-Regular.ttf", 24.0)
|
||||
|
||||
extend {
|
||||
val prompt = "a ${adjectives.random()} ${nouns.random()} ${prepositions.random()} a ${adjectives.random()} ${nouns.random()}"
|
||||
|
||||
Binary file not shown.
Reference in New Issue
Block a user