kramann.info
© Guido Kramann

Login: Passwort:










3.11.5.6 Adding some animation
3.11.5.6 Adding some animation (EN google-translate)
3.11.5.6 Dodawanie animacji (PL google-translate)

Es werden Bildfolgen aus einem Film extrahiert. Die Bilder werden auf den Oberflächen der sechs Kugeln animiert.

Image sequences are extracted from a movie. The pictures are animated on the surfaces of the six balls.

Sekwencje obrazów są wyodrębniane z filmu. Zdjęcia są animowane na powierzchniach sześciu kul.

Screenshot.

Bild 3.11.5.6-1: Screenshot.

import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

import android.media.AudioTrack;
import android.media.AudioFormat;
import android.media.AudioManager;

import android.media.AudioRecord;
import android.media.MediaRecorder;

//VR:
import processing.vr.*;
PShape spheres;
PMatrix3D eyeMat = new PMatrix3D(); //Object to put in the actual eye-matrix
float[][] q = {
                {0,0,-100},
                {0,0,100},
                {0,-100,0},
                {0,100,0},
                {-100,0,0},
                {100,0,0}
              }; //sphere position
              
float[][] qcolor = {
                {255,0,0},
                {0,255,0},
                {255,255,0},
                {0,0,255},
                {0,255,255},
                {255,0,255}
              }; //sphere position
              
float[] ear_left  = new float[3];
float[] ear_right = new float[3];
float[] left_right = new float[2];


Sound sound;
float[][] mywavtone;

//ANIMATION:
PImage[] pics = new PImage[30]; 

public void setup()
{
    for(int i=0;i<pics.length;i++)
        pics[i] = loadImage("b"+(i+1)+".png");
  
    mywavtone = new float[q.length][];
    for(int i=0;i<mywavtone.length;i++)
        mywavtone[i] = ladeWavMixLoop("bachneu"+(i+1)+".wav");
    sound = new Sound();
    fullScreen(STEREO); //with glasses
    //fullScreen(MONO); //without glasses
    orientation(LANDSCAPE);
    
    //VR:
    spheres = createShape(GROUP);
    
    for(int i=0;i<q.length;i++)
    {
       PShape sphere = createShape(SPHERE, 50);
       sphere.setStroke(false);
       sphere.setTexture(pics[i]);
       sphere.translate(q[i][0],q[i][1],q[i][2]);
    
       spheres.addChild(sphere);
    }
}

int PIC_INDEX=0;
public void draw()
{  
    for(int i=0;i<q.length;i++)
    {
         PShape sphere = spheres.getChild(i);
         sphere.setTexture(pics[(i+1+PIC_INDEX)%pics.length]);
    }
    PIC_INDEX++;
    PIC_INDEX%=pics.length;
  
    background(0,0,255);
    getEyeMatrix(eyeMat); //actualize eye matrix  
    translate(eyeMat.m03, eyeMat.m13, eyeMat.m23); //put view point to [0 0 0] == put world to eye-position
    lights(); //turn on lights
    shape(spheres); //render group of shapes (here only one sphere)   
}

public class Sound implements Runnable
{
    private AudioTrack audioTrack; //Connection to sound card
    private int sr=44100; //Sample rate
    private int sr_wav=44100; //Sample rate of the loaded tone. So this is handled seperately.
    //Thus, you do not care about if changing sampling rate sr. It may differ to one of sound files!
    private int buffsize=512; //buffer size
    private int buffsize2=buffsize*2;
    private ScheduledExecutorService schedExecService; //realtime process    
    private short[] shortbuffer = new short[buffsize*2]; //stereo buffer l0 r0 l1 r1 l2 r2 ... frequently sent to sound card

    private double t=0.0; //realtime
    private double dt = 1.0/(double)sr; //time step according to sampling rate.
    private double dt_wav = 1.0/(double)sr_wav; //time step according to sampling rate of loaded file.
    private float MAX_SHORT = 32767.0;
    public Sound()
    {
        try
        {                                                
            audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sr, 
                                    AudioFormat.CHANNEL_OUT_STEREO, 
                                    AudioFormat.ENCODING_PCM_16BIT, 
                                    buffsize*10, 
                                    AudioTrack.MODE_STREAM);
            audioTrack.setStereoVolume(1.0f, 1.0f);
            
            audioTrack.play();            
        }
        catch(Exception eee)
        {
            System.out.println("FEHLER: "+eee);
        }     
        
        schedExecService = Executors.newSingleThreadScheduledExecutor();
        long period = (buffsize*1000)/sr; //Seconds per Beat==60/BPM, die Hälfte weil 8tel, mal 1000 weil Millisekunden.
        schedExecService.scheduleAtFixedRate(this, 0, period, TimeUnit.MILLISECONDS);                
        
        
    }
    public void run()
    {
        for(int i=0;i<buffsize2;i+=2)
        {          
             //ACCURACY OF TIME SHOULD BE HIGHER THAN ONE OF AMPLITUDE.
             //SO TIME IS CALCULATED USING double AND AMPLITUDE USING float.
          
             //This may be the part of the programm where you might want to modify something like
             //introducing other sound sources / directional sound and so on:
             
             //Calculate the actual sample from the provided wav-file:
             //Actual time may be in between two samples:
             float LEFT_SOUND=0.0;
             float RIGHT_SOUND=0.0;
             updateEarLeftEarRight(ear_left,ear_right);             
             for(int k=0;k<q.length;k++)
             {
                 float amplitude = getSampleValue(mywavtone[k],dt_wav,t);

                 //VR begin
             
                 updateVolumeLeftRight(left_right, ear_left,ear_right,q[k]);
             
                 float volume_left  = left_right[0];
                 float volume_right = left_right[1];
             
                 //VR end
             
                 float left  = MAX_SHORT*amplitude*volume_left;
                 float right = MAX_SHORT*amplitude*volume_right;
                 
                 LEFT_SOUND+=left;
                 RIGHT_SOUND+=left;
             }    
             shortbuffer[i]=(short)LEFT_SOUND;
             shortbuffer[i+1]=(short)RIGHT_SOUND;
             t+=dt; //increment of realtime for each sample
        }
      
        audioTrack.write(shortbuffer, 0,buffsize2);      
    }
}

Code 3.11.5.6-1: Androidsound006

/**
*    t==realtime
*   dt_wav == time step of one sample in wavtone
*   mywavtone == mono wavfile
*/
public float getSampleValue(float[] mywavtone, double dt_wav, double t)
{
             //Calculate the actual sample from the provided wav-file:
             //Actual time may be in between two samples:
             double T_entire = dt_wav*(double)mywavtone.length; //duration of file
             double t_file = t - T_entire*Math.floor(t/T_entire); //actual time in looped wav file
             
             int index_lo = (int)(t_file/(double)dt_wav); //first sample index
             int index_hi = (index_lo+1)%mywavtone.length; //following sample index
             
             //calculating weight for each sample:
             double t_part = t_file - dt_wav*Math.floor(t_file/dt_wav);
             double part   = t_part/dt_wav; //range [0,1]
             float weight_lo = 1.0 - (float)part;
             float weight_hi = (float)part;
             
             return  weight_lo*mywavtone[index_lo] + weight_hi*mywavtone[index_hi];  
}

public void updateVolumeLeftRight(float[] left_right, float[] ear_left,float[] ear_right,float[] q)
{
             //Normalize Position vector of sphere:
             float lq = getVectorLength(q);
             
             //Calculate different volume for left and right ear:
             float volume_left  = ear_left[0]*q[0]/lq + ear_left[1]*q[1]/lq + ear_left[2]*q[2]/lq;
             float volume_right = ear_right[0]*q[0]/lq + ear_right[1]*q[1]/lq + ear_right[2]*q[2]/lq;
             
             //cut off negative values:
             if(volume_left<0.0)
                 volume_left=0.0;
             if(volume_right<0.0)
                 volume_right=0.0;
             
             left_right[0] = volume_left;             
             left_right[1] = volume_right;             
}

public void updateEarLeftEarRight(float[] ear_left,float[] ear_right)
{
             getEyeMatrix(eyeMat); //actualize eye matrix for sounding
             //make up ez (direction of view)             
             float[] ez = {eyeMat.m02,-eyeMat.m12,eyeMat.m22};
             float[] ex = {eyeMat.m00,-eyeMat.m10,eyeMat.m20};
             //calculate direction for left ear (+PI/8), right (-PI/8) smaller than in 003:
             ear_left[0] = ez[0]-0.5*ex[0];
             ear_left[1] = ez[1]-0.5*ex[1];
             ear_left[2] = ez[2]-0.5*ex[2];
             
             ear_right[0] = ez[0]+0.5*ex[0];
             ear_right[1] = ez[1]+0.5*ex[1];
             ear_right[2] = ez[2]+0.5*ex[2];
                          
             float length_ear_left  = getVectorLength(ear_left);
             float length_ear_right = getVectorLength(ear_right);
             
             ear_left[0]/=length_ear_left;
             ear_left[1]/=length_ear_left;
             ear_left[2]/=length_ear_left;
             
             ear_right[0]/=length_ear_right;
             ear_right[1]/=length_ear_right;
             ear_right[2]/=length_ear_right;   
}

/**
*    calculates length of a vector
*/
public float getVectorLength(float[] vec)
{
    return sqrt( vec[0]*vec[0]+vec[1]*vec[1]+vec[2]*vec[2] );
    
}
    /**
     * Loads a Stereo-Wav-Datei. It is mixed to one channel and provided as float-Array.
     */
    public float[] ladeWavMix(String name) //Gleich nur Mix aus linkem und rechtem Kanal laden (aus 44100 2x16Bit extrahieren)
    {
            int zl,zh,lo,gesamt;
            float gross = 0.5f*(255.0f*256.0f + 255.0f);          
            byte[] dat = loadBytes(name);
            int anz = dat.length;          
            float[] y = new float[(anz-44)/4];                        

            int inx=44;
            
            for(int i=0;i<y.length;i++)
            {
                 zl = dat[inx++];
                 zh = dat[inx++];
                 if(zl>127)
                     zl-=256;
            
                 if(zh>127)
                     zh-=256;

                 lo     =  zl;
                 if(lo<0)
                     lo+=256;
                 gesamt = (zh+128)*256;                
                 gesamt+=lo;
                
                 y[i] = 0.5f*((float)gesamt - gross)/gross;        

                 zl = dat[inx++];
                 zh = dat[inx++];
                 if(zl>127)
                     zl-=256;            
                 if(zh>127)
                     zh-=256;

                 lo     =  zl;
                 if(lo<0)
                     lo+=256;
                 gesamt = (zh+128)*256;
                
                 gesamt+=lo;
                
                 y[i] += 0.5f*((float)gesamt - gross)/gross;                         
            }                
            return y;    
    }
    
    /**
     * Lädt eine Stereo-Datei, mixt sie zu Mono und baut sie zu einer loop-baren Variante
     * zusammen:<br/>
     * [AB] a=A ausblenden, b=B einblenden<br/>
     * ab + ba<br/>
     */
    public float[] ladeWavMixLoop(String name) //Gleich nur Mix aus linkem und rechtem Kanal laden (aus 44100 2x16Bit extrahieren)
    {
          float[] AB = ladeWavMix(name);   
          for(int i=0;i<AB.length/2;i++)
          {
              AB[i] = AB[i]*(1.0f - (float)i/(float)(AB.length/2)-1);
              int ii = AB.length-1-i;
              AB[ii] = AB[ii]*(1.0f - (float)i/(float)(AB.length/2)-1);
          }   
          
          for(int i=0;i<AB.length/2;i++)
          {
              int ii = AB.length/2+i;
              float a = AB[i];
              float b = AB[ii];
              AB[i] = a+b;
              AB[ii] = a+b;
          }   
          return AB;
    }

Code 3.11.5.6-2: helper tab of Androidsound006

Androidsound006.zip -- sketch folder.
To crop a 80×60 section, starting from position (200, 100):
ffmpeg -i bach.mp4 -filter:v "crop=80:60:200:100" -c:a copy out.mp4

ffmpeg -i bach.mp4 -filter:v "crop=256:256:150:250" -c:a copy out.mp4
avconv -i out.mp4 -f image2 export/b%00d.png 

avconv -i test.avi -f image2 export/b%00d.png 

//nochmal anders:

ffmpeg -i bach.mp4 -filter:v "crop=360:240:150:160" -c:a copy out6.mp4
avconv -i out6.mp4 -f image2 export/b%00d.png 

avconv -i bach5.avi -f image2 export/b%00d.png 

Code 3.11.5.6-3: Convert video and and extract pictures from it.