3. Android Mode


Content

1. Using processing`s Android-Mode
2. Media specific interactions
3. Touch gestures
4. Accessing smartphone sensors
5. Using the keyboard to type messages
6. Detecting markers
7. Programming a cam-effect
8. Android & Virtual Reality: Processing for Google Cardboard




1. Using processing`s Android-Mode to develop for mobile phones

Setup



Hello Android / simple interaction

Tutorials: Processing-Website

Except forsome limitations, Andrid Mode offers the whole set of commands and functions of Processing's Java Mode. In addition some commands exists, that are special for Processing running on a mobile device: Link.

void setup() {
   fullScreen();
   background(10,40,10);
}

void draw(){
   float heavyness = 30-((abs(mouseX-pmouseX))+(abs(mouseY-pmouseY)));
   heavyness = constrain(heavyness, 6, 30);
   strokeWeight(heavyness);
   stroke(150,mouseY,mouseX, 100);
   //stroke(255);
     line(pmouseX, pmouseY, mouseX, mouseY);
}   
    

The very same variables that we used to detect the mouse position (mouseX, mouseY) are now used to detect the position of a finger touching the display.




2. Media specific interactions (touch gestures) and sensor data

In contrast to laptops and desktop computers, which traditionally utilize mouse, keyboard, trackpads for users to interact with a digital system, mobile devices offer different input devices an sensory data:

ketai library

A very usable method to read sensors, detect gestures and camera information in processing is provided by ketai-Library: Link



3. Touch gestures with ketai library

Detecting touch gestures with ketai

Ketai library allows to detect more complex multitouch interactions than the already well known mousePressed()-function (see Reference):

Interacting with gestures. Not all of the gestures shown above are implemented in ketai or Android.



import android.view.MotionEvent;
import ketai.ui.*;

KetaiGesture gesture;

int amount = 10;
PImage[] images = new PImage[amount];
float radius = 1200;
float angleX = 0;
float angleY = 0;
float angleZ = 0;


void setup(){
  fullScreen(P3D);
  orientation(LANDSCAPE);
  imageMode(CENTER);
  gesture = new KetaiGesture(this);
  for (int i = 0; i < images.length; i++){
    images[i] = loadImage(i+".jpeg"); 
  }
}

void draw(){
  background(10,10,30);
  translate(width/2,0,0);
  rotateX(angleX);
  rotateY(angleY);
  rotateZ(angleZ);
  drawPics();
}

void onPinch(float x, float y, float d){
  radius = constrain(radius+d, 10, 2000);
}

void onRotate(float x, float y, float ang)
{
  angleZ += ang;
}

void onFlick( float x, float y, float px, float py, float v){
  float distX = px-x;
  float distY = y-py;
  angleY += distX/1000;
  angleX += distY/1000;
}

void drawPics(){
  float angle = 0;
  for (int i = 0; i < images.length; i++){
     float stepSize = TWO_PI / images.length;      // set stepsize for rotation
     angle = i*stepSize;
     float px = 0 + (radius * cos(angle));
     float py = 0 ;
     float pz = 0 + (radius * sin(angle));
    
     pushMatrix();
     translate(px, py, pz);
     rotateY(-angle+(PI/1.65));
     image(images[i], 0, 0);
     popMatrix();
  }
}

public boolean surfaceTouchEvent(MotionEvent event) {
  super.surfaceTouchEvent(event);          //call to keep mouseX, mouseY, etc updated
  return gesture.surfaceTouchEvent(event);  //forward event to class for processing
}



Exercise (45 Minutes)
Write a sketch that lets you paint structures on your display by using the gestures "double Tap", "pinch" and "rotate".


4. Accessing smartphone sensors with ketai library

Ketai library

Checking available sensors

// From book – change it
import ketai.sensors.*;

KetaiSensor sensor;
boolean hasAccel = false; 
boolean hasGyro = false;
PVector dataAccel = new PVector();
PVector dataGyro = new PVector();

void setup() {
  fullScreen();  
  sensor = new KetaiSensor(this);
  sensor.start();
  
  if (sensor.isAccelerometerAvailable()) {
    hasAccel = true;
    println("Device has accelerometer");
  }
  if (sensor.isGyroscopeAvailable()) {
    hasGyro = true;
    println("Device has gyroscope");
  }  
  
  noStroke();
}

void draw() {
  background(255);
  float h = height/6;
  float y = 0;
  translate(width/2, 0);
  if (hasAccel) {
    fill(#C63030);
    rect(0, y, map(dataAccel.x, -10, +10, -width/2, +width/2), h);
    y += h;
    rect(0, y, map(dataAccel.y, -10, +10, -width/2, +width/2), h);
    y += h;
    rect(0, y, map(dataAccel.z, -10, +10, -width/2, +width/2), h);
    y += h;
  }
  if (hasGyro) {
    fill(#30C652);
    rect(0, y, map(dataGyro.x, -10, +10, -width/2, +width/2), h);
    y += h;
    rect(0, y, map(dataGyro.y, -10, +10, -width/2, +width/2), h);
    y += h;
    rect(0, y, map(dataGyro.z, -10, +10, -width/2, +width/2), h);
  }  
}

void onAccelerometerEvent(float x, float y, float z) {
  dataAccel.set(x, y, z);
}

void onGyroscopeEvent(float x, float y, float z) {
  dataGyro.set(x, y, z);
}
	   



Accelerometer

// From book – change it
import ketai.sensors.*;

KetaiSensor sensor;
float accelerometerX, accelerometerY, accelerometerZ;

void setup() {
  fullScreen();  
  sensor = new KetaiSensor(this);
  sensor.start();
  textAlign(CENTER, CENTER);
  textSize(displayDensity * 36);
}

void draw() {
  background(78, 93, 75);
  text("Accelerometer: \n" +
    "x: " + nfp(accelerometerX, 1, 3) + "\n" +
    "y: " + nfp(accelerometerY, 1, 3) + "\n" +
    "z: " + nfp(accelerometerZ, 1, 3), 0, 0, width, height);
}

void onAccelerometerEvent(float x, float y, float z) {
  accelerometerX = x;
  accelerometerY = y;
  accelerometerZ = z;
}
   



Gyroscope

// From book – change it

import ketai.sensors.*;

KetaiSensor sensor;
float rotationX, rotationY, rotationZ;

void setup() {
  fullScreen(P3D);
  orientation(LANDSCAPE);
  sensor = new KetaiSensor(this);
  sensor.start();
  rectMode(CENTER);
  fill(180);
}

void draw() {
  background(255);
  translate(width/2, height/2);
  rotateZ(rotationZ);
  rotateY(rotationX);
  rotateX(rotationY);
  box(height * 0.3);
}

void onGyroscopeEvent(float x, float y, float z) {
  rotationX += 0.1 * x;
  rotationY += 0.1 * y;
  rotationZ += 0.1 * z;
}



Magnetic sensor



	   



5. Using the keyboard to type messages

openKeyboard()
closeKeyboard()

// From book – change it
String text = "touch the screen to type something";
boolean keyboard = false;

void setup() {
  fullScreen();
  textFont(createFont("Monospaced", 25 * displayDensity));
  textAlign(CENTER);
  fill(100);
}

void draw() {
  background(200);
  text(text, 0, 20, width, height - 40);
}

void keyReleased() {
  if (key == DELETE || key == BACKSPACE) {
    text = text.substring(text.length() - 1);
  } else {
    text += key;
  }
}

void mouseReleased() {
  if (!keyboard) {;
    text = "";
    openKeyboard();
    keyboard = true;
  } else {
    closeKeyboard();
    keyboard = false;
  }
}
   



6. Detecting markers with your Android phone

https://www.mns.kyutech.ac.jp/~hanazawa/education/AR/AR1/

ketai-Library for Cam
NyARToolkit for detection

If we want to active the camera of our mobile phone it is important to give our sketch the according permissions:


import ketai.camera.*;
import jp.nyatla.nyar4psg.*;

KetaiCamera cam;
PImage img;
PImage img2;
MultiMarker nya;

float angle = 0;

void setup() {
  //size(displayWidth, displayHeight,P3D);
  fullScreen(P3D);
  //colorMode(RGB, 100);
  //orientation(LANDSCAPE);
  imageMode(CENTER);
  cam = new KetaiCamera(this, 640, 640, 24);
  img = new PImage(640, 480);
  nya=new MultiMarker(this,640,480,"camera_para.dat",NyAR4PsgConfig.CONFIG_PSG);
  nya.addARMarker("patt.hiro",80);
  //fill(150);
  cam.start();
}

void onCameraPreviewEvent(){
  cam.read();
}

void draw() {
    image(cam, width/2, height/2);
    img.copy(cam, 0, 0, 640, 480, 0, 0, 640, 480);
    nya.detect(img);
    
    if((nya.isExistMarker(0))){
     nya.beginTransform(0);
     fill(0,0,255);
     //translate(0,0,20);
     box(40);
     nya.endTransform();
   }
    
}
	



7. Programming a cam-effect for smartphones and saving the image

import ketai.camera.*;


KetaiCamera cam;
void setup() {
  orientation(LANDSCAPE);
  imageMode(CENTER);
  cam = new KetaiCamera(this, 1280, 720, 24);
}

void onCameraPreviewEvent(){
  cam.read();
}

void draw() {
  if(cam.isStarted())
    image(cam, width/2, height/2);
  else
    cam.start();
}

Now we can adapt the code for altering images (from part one of the course) to code an effect for our mobile display.

import ketai.camera.*;

KetaiCamera cam;

float resolutionX = 30;
float resolutionY = 30;

void setup() {
  orientation(LANDSCAPE);
  imageMode(CENTER);
  cam = new KetaiCamera(this, 1280, 720, 24);
}

void onCameraPreviewEvent(){
  cam.read();
}

void draw() {
  if(cam.isStarted()){
    image(cam, width/2, height/2);
    for(int y=0; y < height; y += resolutionY){ 
     for(int x=0; x < width; x += resolutionX){
       int pos = x + y * width;
       color c = cam.get(x,y);
       fill(c);
       noStroke();
       ellipse(x,y,resolutionX, resolutionY);
     }
   }
  }else{
    cam.start();
  }
}     


Exercise: (30 Minutes)
Code your own visual effect for your mobile camera. Make parameters its visual output (e.g. radius of circles, ...) controllable by gestures.

You know how to do this.

If we want to save an image of the camera to SD-storage we simply have to add a mouse-event that triggers the savePhoto()-function that ketai-library provides. Then we have to save the captured image to Androids Media-Library with ketai's function addToMediaLibrary().




void mousePressed() {
  cam.savePhoto();
}

void onCameraPreviewEvent(){
  cam.read();
}

void onSavePhotoEvent(String filename){
  cam.addToMediaLibrary(filename);                               
}

But the code above just saves the image coming from the camera. If we want the processed effect our displays shows, we have to store each pixel of it in a PImage an then save this PImage:



void saveImage(String file) {
  displayCopy = get();
  try
  { 
    directory = new String(Environment.getExternalStorageDirectory().getAbsolutePath());
    displayCopy.save(directory + "/" + file);
  } 
}

The function saveImage saves the current image to the SD-card of the mobile phone. With a bit of coding with Android SDK it would also be possible to save the image to the MediaStore and thus make it available to other apps.



import ketai.camera.*;
import android.os.Environment;

KetaiCamera cam;

float resolutionX = 30;                    // Resultion for the effect
float resolutionY = 30;

String directory = "";
String filename = "image.jpg";                 // filename for our saved image

PImage displayCopy;                            // PImage to store the pixels of the display image
                                               // for saving

void setup() {
  orientation(LANDSCAPE);                      // force phone in landscape mode
  imageMode(CENTER);
  cam = new KetaiCamera(this, 1280, 720, 24);  // set cam
}

void draw() {
  if(cam.isStarted()){
    image(cam, width/2, height/2);               // draw cam image
    for(int y=0; y < height; y += resolutionY){  // loop through all pixels
     for(int x=0; x < width; x += resolutionX){  // and process a tiny effect
       int pos = x + y * width;                  
       color c = cam.get(x,y);                  
       fill(c);
       noStroke();
       ellipse(x,y,resolutionX, resolutionY);
     }
   }
  }else{
    cam.start();
  }
}

// Function to save image to SD card
void saveImage(String file) {
  displayCopy = get();
  try
  { 
    directory = new String(Environment.getExternalStorageDirectory().getAbsolutePath());
    displayCopy.save(directory + "/" + file);
  } 
}

// save image when tapping the screen
void mousePressed(){
  saveImage(filename);
  //cam.savePhoto();                  // ketai`s function to save a cam image to android`s media folder
}

// read cam image if available
void onCameraPreviewEvent(){
  cam.read();                        
}

void onSavePhotoEvent(String filename){
  cam.addToMediaLibrary(filename);                               
}

All together: Saving gesture drawings made on your mobile-display

Exercise: (15 Minutes)
Enhance your drawing sketch by a function that saves your artworks as an image-file on your phone.



8. Android & Virtual Reality: Processing for Google Cardboard

Prepare Processing for Cardboard


Writing Sketches for Cardboard

First, processing`s library for cardboard has to be imported (import processing.cardboard.*;). The function fullscreen() gets PCardboard.STEREO as a parameter. All visual output of our sketch will be rendered fullscreen in stereoscopic cardboard mode.

Example: Prepare a sketch for Cardboard


import processing.cardboard.*;

void setup() {
  fullScreen(PCardboard.STEREO);
}

void draw() {
}
    

Now everything we draw to our sketch will be rendered automatically to be viewed with a headset compatible with cardboard.

"There is nothing special to add 3D objects to the scene, you simply use the same functions for drawing 3D primitives and shapes with the P3D renderer."

Of course the computing power of a smartphone isn`t as good as a laptop, so we should keep the question in mind, if our mobile is capable of processing our high-end-3D-open-world-parcours-game we came up with in realtime and without glitches.

Settings:



import processing.cardboard.*;

PShape grid;
PShape cubes;

void setup() {
  fullScreen(PCardboard.STEREO);
  
  grid = createShape();
  grid.beginShape(LINES);
  grid.stroke(255);
  for (int x = -10000; x < +10000; x += 250) {
    grid.vertex(x, -1000, +10000);
    grid.vertex(x, -1000, -10000);
  }
  for (int z = -10000; z < +10000; z += 250) {
    grid.vertex(+10000, -1000, z);
    grid.vertex(-10000, -1000, z);      
  }  
  grid.endShape();  
  
  cubes = createShape(GROUP);
  for (int i = 0; i < 100; i++) {
    float x = random(-1000, +1000); 
    float y = random(-1000, +1000);
    float z = random(-1000, +1000);
    
    float r = random(50, 150);
    PShape cube = createShape(BOX, r, r, r);
    cube.setStroke(false);
    cube.setFill(color(180));
    cube.translate(x, y, z);
    cubes.addChild(cube);
  }
}

void draw() {
  background(0);
  ambientLight(150, 150, 150);
  pointLight(255, 255, 255, 0, 0, 0);
  translate(width/2 - 1000, height/2, 500);
  shape(cubes);
  shape(grid);
}



import processing.cardboard.*;

int amount = 10;                         // we`l use 10 images (stored in the data folder)
PImage[] images = new PImage[amount];    // declare an array for the Images
float radius = 1200;                    
float angleX = 0;
float angleY = 0;
float angleZ = 0;


void setup(){
  //fullScreen(P3D);
  //orientation(LANDSCAPE);
  fullScreen(PCardboard.STEREO);
  for (int i = 0; i < images.length; i++){// loop through our array  
    images[i] = loadImage(i+".jpeg");     // Load the images into our array (filenames are 0.jpeg, 1.jpeg,…)
  }
}

void draw(){
  background(10,10,30);                  //erase everything with deep blue
  drawPics();                            //call our function
}


void drawPics(){
  float angle = 0;
  for (int i = 0; i < images.length; i++){
     float stepSize = TWO_PI / images.length;      // set stepsize for rotation
     angle = i*stepSize;
     float px = 0 + (radius * cos(angle));         // calculate the spherical x,y,z-position for every image
     float py = 0 ;
     float pz = 0 + (radius * sin(angle));
    
     pushMatrix();                                  // save matrix to stack
     rotateZ(PI);                                   // flip image
     translate(px, py, pz);                         // move drawing location to calculated xmymz-postion
     rotateY(-angle+(PI/1.75));                     // rotate image accordingly in space
     image(images[i], 0, 0);                        // now finally draw the image
     popMatrix();                                   // restore old matrix
  }
}




//import processing.cardboard.*;

import peasy.*;
PeasyCam camera;

int len = 400;                          // length of trail

float[] angleA = new float[len];        // Array for angle 1 of spherical points 
float[] angleB = new float[len];        // Array for angle 2 of spherical points 
color[] hues = new color[len];

float radius = 1000;                    // Distance of the trail
float lastX, lastY, lastZ;              // to store last position

float noiseXoffset = 0;                 // starting position for perlin noise X
float noiseYoffset = 4;                 // starting position for perlin noise Y

int centerX, centerY, centerZ;
float hueOffset = 0;

void setup(){
   //fullScreen(PCardboard.STEREO);
   size(800,600, P3D);
   frameRate(30);
   camera = new PeasyCam(this, 0, 0, 0, 4000);
   centerX = 0;
   centerY = 0;
   centerZ = 0;
   colorMode(HSB);
   /*for(int i = 1; i < angleA.length; i++){                  // loop through Array
     noiseXoffset += random(0.1);                           // vary step size for noise
     noiseYoffset += random(0.1);
     angleA[i] = map(noise(noiseXoffset), 0, 1, 0, PI);      // calculate new angle 1 for point
     angleB[i] = map(noise(noiseYoffset), 0, 1, 0, TWO_PI);  // calculate new angle 2 for point
     hues[i] = color(i, 100, 255);                           // set hue
   }*/
   stroke(0,100);
}


void draw(){
  background(hueOffset,100,100);
  noiseXoffset += random(0.06);
  noiseYoffset += random(0.02);
  if (hueOffset<360) hueOffset += 1; else hueOffset = 0;
  
  // Shift Arrays
  for(int i = 1; i < angleA.length; i++){                          // loop through Array(s) (starting at 1)
    angleA[i-1] = angleA[i];                                       // shift actual i to i-1
    angleB[i-1] = angleB[i];
    hues[i-1] = hues[i];
  }
  
  // Add actual Position at the end of array
  angleA[angleA.length-1] = noise(noiseXoffset) * TWO_PI * 2;      // take next values from Perlin Noise and use
  angleB[angleB.length-1] = (noise(noiseYoffset)-0.5) * TWO_PI * 2;// it as a radian value for our angles
  hues[hues.length-1]  = color(hueOffset,100,255);                 // add new hue
  
  // Draw Trail
  noFill();
  beginShape();                                                    // begin a Shape made out of vertices
  for(int i = 1; i < angleA.length; i++){                          // loop through Array(s)
   float x = centerX + radius * sin(angleA[i]) * cos(angleB[i]);
   float y = centerY + radius * sin(angleA[i]) * sin(angleB[i]); 
   float z = centerZ+ radius * cos(angleA[i]); 
   //hues[i] = color(i, 255, 255);
   stroke(hues[i], i*2);                                           // set hue from array
   strokeWeight(i/(len/100));                                      // set width of stroke (the older the thinner)
   vertex(x, y, z);                                                // draw next point
  }
  endShape();
}


Still to do: Bringing it all together!

Unfortunately, Android-Mode and the neccessary libraries aren`t working together very well at the moment. So some Java-coding would be needed to achieve these goals, which is out of the scope of this course.