In statistics, linear regression is a linear approach for modelling the relationship between a scalar response and one or more explanatory variables. Watch YouTube video here - By: Daniel Shiffman.
let canvas;
let x_vals = [];
let y_vals = [];
let m, b;
const learningRate = 0.5;
let optimizer ;
async function setup() {
canvas = createCanvas(windowWidth, windowHeight);
await pSetup();
await loadScript("https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js");
optimizer = tf.train.sgd(learningRate);
m = tf.variable(tf.scalar(random(1)));
b = tf.variable(tf.scalar(random(1)));
}
function draw() {
// optimizer = tf.train.sgd(learningRate);
tf.tidy(() => {
if (x_vals.length > 0) {
const ys = tf.tensor1d(y_vals);
optimizer.minimize(() => loss(predict(x_vals), ys));
}
});
background(0);
stroke(255);
strokeWeight(8);
for (let i = 0; i < x_vals.length; i++) {
let px = map(x_vals[i], 0, 1, 0, width);
let py = map(y_vals[i], 0, 1, height, 0);
point(px, py);
}
const lineX = [0, 1];
const ys = tf.tidy(() => predict(lineX));
let lineY = ys.dataSync();
ys.dispose();
let x1 = map(lineX[0], 0, 1, 0, width);
let x2 = map(lineX[1], 0, 1, 0, width);
let y1 = map(lineY[0], 0, 1, height, 0);
let y2 = map(lineY[1], 0, 1, height, 0);
strokeWeight(2);
line(x1, y1, x2, y2);
// console.log(tf.memory().numTensors);
}
function loss(pred, labels) {
return pred.sub(labels).square().mean();
}
function predict(x) {
const xs = tf.tensor1d(x);
// y = mx + b;
const ys = xs.mul(m).add(b);
return ys;
}
function mousePressed() {
let x = map(mouseX, 0, width, 0, 1);
let y = map(mouseY, 0, height, 1, 0);
x_vals.push(x);
y_vals.push(y);
}
function keyPressed() {
if (key == "f") {
fullScreenToggle();
}
}
This example uses the teachable machine tutorial from Google.
Go to the webpage and click 'Get Started'.
Choose type classification.
Name the two classes. E.g. Cup and noCup.
Record a bunch of images, sounds, and poses with either a cup or no cup.
Ask it to train your classifier.
When done, press export model.
Press the upload model button.
Copy and paste the code and change the model URL at the top of the sketch.
You can just run your code.
let canvas;
let cam, imageClassifier;
let imageModelURL = "https://teachablemachine.withgoogle.com/models/XHOPUULzb/";
async function setup() {
canvas = createCanvas(windowWidth, windowHeight);
await pSetup();
await loadScript("portal/P5ImageClassifier.js");
// might work better with 320, 240,
cam = setupWebcamera(false, undefined,undefined, true);
imageClassifier = new P5ImageClassifier({ model: imageModelURL, video: cam, backend: 'webgl', topK: 3 });
imageClassifier.init().then(() => imageClassifier.start());
textSize(20);
fill(255);
}
function draw() {
background(0);
image(cam,0,0);
imageClassifier.drawResults(16, height - 60, 20);
if(imageClassifier.hasResult())
{
if(imageClassifier.getBest().label == "Mads Med kaffe")
{
noFill();
stroke("red");
strokeWeight(5);
rect(20,20,cam.width-20,cam.height-20);
}
}
}
function keyPressed() {
if (key == "f") {
fullScreenToggle();
}
}
Todo: Only image classification has been made. Other classifiers needs to be pose + audio.
Webcam Image Classification using MobileNet and p5.js
This example uses a callback pattern to create the classifier. List of classifiers: https://github.com/ml5js/ml5-library/blob/master/src/utils/IMAGENET_CLASSES.js
let canvas;
let cam, imageClassifier;
async function setup() {
canvas = createCanvas(windowWidth, windowHeight);
await pSetup();
await loadScript("portal/P5ImageClassifier.j");
// might work better with 320, 240,
cam = setupWebcamera(false, undefined,undefined, true);
imageClassifier = new P5ImageClassifier({ model: 'MobileNet', video: cam, backend: 'webgl', topK: 3 });
imageClassifier.init().then(() => imageClassifier.start());
textSize(20);
fill(255);
}
function draw() {
background(0);
image(cam,0,0);
imageClassifier.drawResults(16, height - 60, 20);
if(imageClassifier.hasResult())
{
if(imageClassifier.getBest().label == "teapot")
{
noFill();
stroke("red");
strokeWeight(5);
rect(20,20,cam.width-20,cam.height-20);
}
}
}
function keyPressed() {
if (key == "f") {
fullScreenToggle();
}
}
Webcam Image Classification using darknet and p5.js
let canvas;
let cam, imageClassifier;
async function setup() {
canvas = createCanvas(windowWidth, windowHeight);
await pSetup();
await loadScript("portal/P5ImageClassifier.js");
// might work better with 320, 240,
cam = setupWebcamera(false, undefined,undefined, true);
imageClassifier = new P5ImageClassifier({ model: 'Darknet', video: cam, backend: 'webgl', topK: 3 });
imageClassifier.init().then(() => imageClassifier.start());
textSize(20);
fill(255);
}
function draw() {
background(0);
image(cam,0,0);
imageClassifier.drawResults(16, height - 60, 20);
if(imageClassifier.hasResult())
{
if(imageClassifier.getBest().label == "teapot")
{
noFill();
stroke("red");
strokeWeight(5);
rect(20,20,cam.width-20,cam.height-20);
}
}
}
function keyPressed() {
if (key == "f") {
fullScreenToggle();
}
}
Webcam Image Classification using darknet and p5.js
Object detection libraries have been taught to recognise objects through a huge library of images of different objects - the COCOSSD method has the following syntax:
for (let i = 0; i < detections.length; i++) {
let object = detections[i];
rect(object.x, object.y, object.width, object.height);
text(object.label, object.x + 10, object.y + 24);
}
The for loop runs through the objects detected in the array detections. For each object, it draws a rectangle around the object and adds a text label.
Just to let you know, the library takes a while to load.
let canvas;
let cam, objectDetector;
async function setup() {
canvas = createCanvas(windowWidth, windowHeight);
await pSetup();
await loadScript("portal/P5ObjectDetector.js");
// might work better with 320, 240,
cam = setupWebcamera(false, undefined,undefined, true);
objectDetector = await new P5ObjectDetector({
model: 'cocossd',
video: cam,
backend: 'webgl',
scoreThreshold: 0.5,
onDetections: (dets) => {
// your hook for side effects
// console.log(dets);
}
}).init();
objectDetector.start();
textSize(20);
fill(255);
}
function draw() {
background(0);
image(cam,0,0);
objectDetector.drawDetections(0, 0, true);
if(objectDetector.hasResult())
{
}
}
function keyPressed() {
if (key == "f") {
fullScreenToggle();
}
}
Handpose is a machine learning model that enables palm detection and finger tracking of the hand skeleton in the browser. It can detect a maximum of one hand at a time and provides 2D and 3D hand keypoints that describe the current locations on the palm and fingers.
let canvas;
let cam, handPose;
async function setup() {
canvas = createCanvas(windowWidth, windowHeight);
await pSetup();
await loadScript("portal/handPose.js");
cam = await setupWebcamera(false, 640, 480, true);
handPose = await new HandPose({
video: cam,
backend: "webgl",
videoIsFlipped: true,
onResults: (hands) => {
// console.log(hands);
},
}).init();
handPose.start();
textSize(20);
fill(255);
}
function draw() {
background(0);
image(cam, 0, 0);
handPose?.drawHands(0, 0);
if (handPose.hasResult()) {
let tipPos = handPose.getLeftHand().index_finger_tip;
ellipse(tipPos.x, tipPos.y, 20, 20);
}
}
function keyPressed() {
if (key == "f") {
fullScreenToggle();
}
}
You need to get a password from mads or create your own API key
Put this code before your setup function:
let urlToSketch = "https://editor.p5js.org/hobye/sketches/XOYHX3qgV";
let apiKeyEncryptedGpt12 =
"U2FsdGVkX18ufo+Jv5eV1uiVVu23Jjvr8SaHfqG2rnsUq75hmr1av/B4KStyhTJtJwMgyyM6CP9gKXuUEu8F2m52Ey+wyLSiuI34pcMYOnPOVrngAAE3EMJg1Sx52sdns3JzqQHJgma6chold+TcfgeYqG/4O8wdRiKLz64Ic+v9uB+xDrzxJ2Cazu4En9yWPTKskgvccEn3ls0+zVGacW1zLaNyJXmzm+yHE0mkro+a/5lWzZFRT6UX6+HVEgqi";
let apiKey = "";
let gpt;
Put this code in your setup function
await loadScript("portal/GptClient.js");
apiKey = storedDecrypt({ apiKeyEncryptedGpt12 });
gpt = new GptClient({
apiKey,
model: "gpt-4o-mini",
instructions:
"You answer questions clearly and as simple as possible. preferably one word.",
});
await gpt.ask("What is the capital of France?");
// 4. Now read result from the client
if (gpt.error) {
console.log("Error:", gpt.error);
} else if (gpt.latestObject) {
console.log("Answer text:", gpt.latestObject.text);
}
You need to get a password from mads or create your own API key
Put this code before your setup function:
let urlToSketch = "https://editor.p5js.org/hobye/sketches/XOYHX3qgV";
let apiKeyEncryptedGpt12 =
"U2FsdGVkX18ufo+Jv5eV1uiVVu23Jjvr8SaHfqG2rnsUq75hmr1av/B4KStyhTJtJwMgyyM6CP9gKXuUEu8F2m52Ey+wyLSiuI34pcMYOnPOVrngAAE3EMJg1Sx52sdns3JzqQHJgma6chold+TcfgeYqG/4O8wdRiKLz64Ic+v9uB+xDrzxJ2Cazu4En9yWPTKskgvccEn3ls0+zVGacW1zLaNyJXmzm+yHE0mkro+a/5lWzZFRT6UX6+HVEgqi";
let apiKey = "";
let gpt;
Put this code in your setup function
await loadScript("portal/GptClient.js");
apiKey = storedDecrypt({ apiKeyEncryptedGpt12 });
const schema = [
{
name: "color_response",
description: "Return the color mentioned in the text.",
parameters: {
type: "object",
properties: {
color: { type: "string" }
},
required: ["color"]
}
}
];
gpt = new GptClient({
apiKey,
model: "gpt-4o-mini",
instructions: "Extract the color mentioned in the user's sentence.",
functionSchemas: schema,
functionName: "color_response"
});
const res = await gpt.ask("I like the color blue.");
console.log(res); // { color: "blue" } or { error: "..." }
You need to get a password from mads or create your own API key
Put this code before your setup function:
let urlToSketch = "https://editor.p5js.org/hobye/sketches/XOYHX3qgV";
let apiKeyEncryptedGpt12 =
"U2FsdGVkX18ufo+Jv5eV1uiVVu23Jjvr8SaHfqG2rnsUq75hmr1av/B4KStyhTJtJwMgyyM6CP9gKXuUEu8F2m52Ey+wyLSiuI34pcMYOnPOVrngAAE3EMJg1Sx52sdns3JzqQHJgma6chold+TcfgeYqG/4O8wdRiKLz64Ic+v9uB+xDrzxJ2Cazu4En9yWPTKskgvccEn3ls0+zVGacW1zLaNyJXmzm+yHE0mkro+a/5lWzZFRT6UX6+HVEgqi";
let apiKey = "";
let gpt;
Put this code in your setup function
await loadScript("portal/GptClient.js");
apiKey = storedDecrypt({ apiKeyEncryptedGpt12 });
// --- Step 3. Define structured response schema ---
const functionSchemas = [
{
name: "color_response",
description: "Return the dominant color of the image as a word.",
parameters: {
type: "object",
properties: { color: { type: "string" } },
required: ["color"],
},
},
];
// --- Step 4. Make the GptClient ---
gpt = new GptClient({
apiKey,
model: "gpt-4o-mini",
instructions:
"Look carefully at the user's image and describe the main color. Respond using the provided color_response tool.",
functionSchemas,
functionName: "color_response",
});
// --- Step 1. Make a red image using createGraphics() ---
img = createGraphics(256, 256);
img.background(255, 0, 0); // bright red square
const block = await gpt._makeImageBlock(img);
console.log("image block going to API:", block);
// --- Step 5. Ask GPT about the image ---
const res = await gpt.ask("What color is this image?", img);
console.log("GPT result:", res);
if (res.error) {
resultText = "Error: " + res.error;
} else if (res.color) {
resultText = "Detected color: " + res.color;
} else if (res.text) {
resultText = "Answer: " + res.text;
} else {
resultText = "Unknown response";
}