Verändern der Lautstärke in Abhängigkeit der Blickrichtung
(EN google-translate)
(PL google-translate)

Bild 0-1: Screenshot from program explained below.
In einer weiteren Erweiterung des bisheringen Programms soll nun eine kleine VR-Umgebung mit Sound erstellt werden:
An der Stelle q=[0,0,-100] wird eine rote Kugel positioniert. Die Kugel soll eine Punktschallquelle sein. Von dieser Kugel aus soll der Klang der Audiodatei kommen. Hierzu wird berechnet, wie weit die Hörrichtung jedes Ohres mit der Richtung zur Schallquelle übereinstimmt und so die Lautstärke für das linke und rechte Ohr getrennt bestimmt. Der entsprechende neue Programmabschnitt wird im Quelltext mit //VR BEGIN //VR END markiert.
ACHTUNG: Damit dieses Programm funktioniert muß von Android->App auf Android->VR umgestellt werden.
Während die vorangehenden Programme auch mit älteren Android Geräten funktionieren, funktioniert dieses nur richtig mit solchen ab Android 6 mit Gyroskop.
import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import android.media.AudioTrack; import android.media.AudioFormat; import android.media.AudioManager; import android.media.AudioRecord; import android.media.MediaRecorder; //VR: import processing.vr.*; PShape spheres; PMatrix3D eyeMat = new PMatrix3D(); //Object to put in the actual eye-matrix float[] q = {0,0,-100}; //sphere position Sound sound; float[] mywavtone; public void setup() { mywavtone = ladeWavMix("ton69.wav"); sound = new Sound(); fullScreen(STEREO); //with glasses //fullScreen(MONO); //without glasses orientation(LANDSCAPE); //VR: spheres = createShape(GROUP); PShape sphere = createShape(SPHERE, 50); sphere.setStroke(false); sphere.setFill(color(255,0,0)); //sphere.translate(0,0,-100); sphere.translate(q[0],q[1],q[2]); spheres.addChild(sphere); } public void draw() { background(0,0,255); getEyeMatrix(eyeMat); //actualize eye matrix translate(eyeMat.m03, eyeMat.m13, eyeMat.m23); //put view point to [0 0 0] == put world to eye-position lights(); //turn on lights shape(spheres); //render group of shapes (here only one sphere) } public class Sound implements Runnable { private AudioTrack audioTrack; //Connection to sound card private int sr=44100; //Sample rate private int sr_wav=44100; //Sample rate of the loaded tone. So this is handled seperately. //Thus, you do not care about if changing sampling rate sr. It may differ to one of sound files! private int buffsize=512; //buffer size private int buffsize2=buffsize*2; private ScheduledExecutorService schedExecService; //realtime process private short[] shortbuffer = new short[buffsize*2]; //stereo buffer l0 r0 l1 r1 l2 r2 ... frequently sent to sound card private double t=0.0; //realtime private double dt = 1.0/(double)sr; //time step according to sampling rate. private double dt_wav = 1.0/(double)sr_wav; //time step according to sampling rate of loaded file. private float MAX_SHORT = 32767.0; public Sound() { try { audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sr, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT, buffsize*10, AudioTrack.MODE_STREAM); audioTrack.setStereoVolume(1.0f, 1.0f); audioTrack.play(); } catch(Exception eee) { System.out.println("FEHLER: "+eee); } schedExecService = Executors.newSingleThreadScheduledExecutor(); long period = (buffsize*1000)/sr; //Seconds per Beat==60/BPM, die Hälfte weil 8tel, mal 1000 weil Millisekunden. schedExecService.scheduleAtFixedRate(this, 0, period, TimeUnit.MILLISECONDS); } public void run() { for(int i=0;i<buffsize2;i+=2) { //ACCURACY OF TIME SHOULD BE HIGHER THAN ONE OF AMPLITUDE. //SO TIME IS CALCULATED USING double AND AMPLITUDE USING float. //This may be the part of the programm where you might want to modify something like //introducing other sound sources / directional sound and so on: //Calculate the actual sample from the provided wav-file: //Actual time may be in between two samples: double T_entire = dt_wav*(double)mywavtone.length; //duration of file double t_file = t - T_entire*Math.floor(t/T_entire); //actual time in looped wav file int index_lo = (int)(t_file/(double)dt_wav); //first sample index int index_hi = (index_lo+1)%mywavtone.length; //following sample index //calculating weight for each sample: double t_part = t_file - dt_wav*Math.floor(t_file/dt_wav); double part = t_part/dt_wav; //range [0,1] float weight_lo = 1.0 - (float)part; float weight_hi = (float)part; float amplitude = weight_lo*mywavtone[index_lo] + weight_hi*mywavtone[index_hi]; //VR begin //From this point on amplitude has to be modified for the left ear and the right ear. getEyeMatrix(eyeMat); //actualize eye matrix for sounding //make up ez (direction of view) float[] ez = {eyeMat.m02,-eyeMat.m12,eyeMat.m22}; float[] ex = {eyeMat.m00,-eyeMat.m10,eyeMat.m20}; //calculate direction for left ear (+PI/4), right (-PI/4): float[] ear_left = {ez[0]-ex[0],ez[1]-ex[1],ez[2]-ex[2]}; float[] ear_right = {ez[0]+ex[0],ez[1]+ex[1],ez[2]+ex[2]}; float length_ear_left = getVectorLength(ear_left); float length_ear_right = getVectorLength(ear_right); ear_left[0]/=length_ear_left; ear_left[1]/=length_ear_left; ear_left[2]/=length_ear_left; ear_right[0]/=length_ear_right; ear_right[1]/=length_ear_right; ear_right[2]/=length_ear_right; //Normalize Position vector of sphere: float length_q = getVectorLength(q); float[] qn = {q[0]/length_q,q[1]/length_q,q[2]/length_q}; //Calculate different volume for left and right ear: float volume_left = ear_left[0]*qn[0] + ear_left[1]*qn[1] + ear_left[2]*qn[2]; float volume_right = ear_right[0]*qn[0] + ear_right[1]*qn[1] + ear_right[2]*qn[2]; //cut off negative values: if(volume_left<0.0) volume_left=0.0; if(volume_right<0.0) volume_right=0.0; //VR end float left = MAX_SHORT*amplitude*volume_left; float right = MAX_SHORT*amplitude*volume_right; shortbuffer[i]=(short)left; shortbuffer[i+1]=(short)right; t+=dt; //increment of realtime for each sample } audioTrack.write(shortbuffer, 0,buffsize2); } } /** * Loads a Stereo-Wav-Datei. It is mixed to one channel and provided as float-Array. */ public float[] ladeWavMix(String name) //Gleich nur Mix aus linkem und rechtem Kanal laden (aus 44100 2x16Bit extrahieren) { int zl,zh,lo,gesamt; float gross = 0.5f*(255.0f*256.0f + 255.0f); byte[] dat = loadBytes(name); int anz = dat.length; float[] y = new float[(anz-44)/4]; int inx=44; for(int i=0;i<y.length;i++) { zl = dat[inx++]; zh = dat[inx++]; if(zl>127) zl-=256; if(zh>127) zh-=256; lo = zl; if(lo<0) lo+=256; gesamt = (zh+128)*256; gesamt+=lo; y[i] = 0.5f*((float)gesamt - gross)/gross; zl = dat[inx++]; zh = dat[inx++]; if(zl>127) zl-=256; if(zh>127) zh-=256; lo = zl; if(lo<0) lo+=256; gesamt = (zh+128)*256; gesamt+=lo; y[i] += 0.5f*((float)gesamt - gross)/gross; } return y; } public float getVectorLength(float[] vec) { return sqrt( vec[0]*vec[0]+vec[1]*vec[1]+vec[2]*vec[2] ); }
Code 0-1: Androidsound003
