Skip to content Skip to sidebar Skip to footer

How To Navigate A Google Glass Gdk Immersion Application Using Voice Command Only?

How would I go about coding a voice trigger to navigate Google Glass Cards? This is how I see it happening: 1) 'Ok Glass, Start My Program' 2) Application begins and shows the fi

Solution 1:

this thing define in onCreate method

mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); 
    //  mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, true);

    sr = SpeechRecognizer.createSpeechRecognizer(context);       
    sr.setRecognitionListener(newlistener(context));   

    //      intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "en-US");
    intent = newIntent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);        
    intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    intent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,context.getPackageName());
    sr.startListening(intent);
    Log.i("111111","11111111"+"in");

This listener class simply add in your class

classlistenerimplementsRecognitionListener          
{
    Context context1;
    publiclistener(Context context)
    {
        //Log.i("onError startListening","enter"+"nam");
        context1=context;
    }
    publicvoidonReadyForSpeech(Bundle params)
    {
        //Log.d(TAG, "onReadyForSpeech");
    }
    publicvoidonBeginningOfSpeech()
    {
        //Log.d(TAG, "onBeginningOfSpeech");
    }
    publicvoidonRmsChanged(float rmsdB)
    {
        //Log.d(TAG, "onRmsChanged");
    }
    publicvoidonBufferReceived(byte[] buffer)
    {
        //Log.d(TAG, "onBufferReceived");
    }
    publicvoidonEndOfSpeech()
    {
        //Log.d(TAG, "onEndofSpeech");
        sr.startListening(intent);
    }
    publicvoidonError(int error)
    {
        //Log.d(TAG,  "error " +  error);//7 -No recognition result matched.//9 - vInsufficient permissions //6 - No speech input //8 RecognitionService busy. //5 Other client side errors. //3 Audio recording error.  //  mText.setText("error " + error);if(error==6 || error==7 || error==4  || error==1 || error==2 || error==5 || error==3 || error==8 || error==9 )
        { 
            sr.startListening(intent);
            //Log.i("onError startListening","onError startListening"+error);
        }
    }
    publicvoidonResults(Bundle results)                   
    {
        //Log.v(TAG,"onResults" + results);ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        for (int i = 0; i < data.size(); i++)
        {
            //Log.d(TAG, "result " + data.get(i));//str += data.get(i);//Toast.makeText(context1, "results: "+data.get(0).toString(), Toast.LENGTH_LONG).show();//Log.v("my", "output"+"results: "+data.get(0).toString());//sr.startListening(intent);
                   }
    }
    publicvoidonPartialResults(Bundle partialResults)
    {
        //Log.d(TAG, "onPartialResults");
    }
    publicvoidonEvent(int eventType, Bundle params)
    {
        //Log.d(TAG, "onEvent " + eventType);
    }
}

Solution 2:

I'm writing out the entire code in detail since it took me such a long time to get this working.. perhaps it'll save someone else valuable time.

This code is the implementation of Google Contextual Voice Commands as described on Google Developers here: Contextual voice commands

ContextualMenuActivity.java

package com.drace.contextualvoicecommands;

    import android.app.Activity;
    import android.os.Bundle;
    import android.view.Menu;
    import android.view.MenuItem;
    import com.drace.contextualvoicecommands.R;
    import com.google.android.glass.view.WindowUtils;

    publicclassContextualMenuActivityextendsActivity {

    @OverrideprotectedvoidonCreate(Bundle bundle) {
        super.onCreate(bundle);

        // Requests a voice menu on this activity. As for any other// window feature, be sure to request this before// setContentView() is called
        getWindow().requestFeature(WindowUtils.FEATURE_VOICE_COMMANDS);
        setContentView(R.layout.activity_main);
    }

    @OverridepublicbooleanonCreatePanelMenu(int featureId, Menu menu) {
        if (featureId == WindowUtils.FEATURE_VOICE_COMMANDS) {
            getMenuInflater().inflate(R.menu.main, menu);
            returntrue;
        }
        // Pass through to super to setup touch menu.returnsuper.onCreatePanelMenu(featureId, menu);
    }

    @OverridepublicbooleanonCreateOptionsMenu(Menu menu) {
        getMenuInflater().inflate(R.menu.main, menu);
        returntrue;
    }

    @OverridepublicbooleanonMenuItemSelected(int featureId, MenuItem item) {
        if (featureId == WindowUtils.FEATURE_VOICE_COMMANDS) {
            switch (item.getItemId()) {
                case R.id.dogs_menu_item:
                    // handle top-level dogs menu itembreak;
                case R.id.cats_menu_item:
                    // handle top-level cats menu itembreak;
                case R.id.lab_menu_item:
                    // handle second-level labrador menu itembreak;
                case R.id.golden_menu_item:
                    // handle second-level golden menu itembreak;
                case R.id.calico_menu_item:
                    // handle second-level calico menu itembreak;
                case R.id.cheshire_menu_item:
                    // handle second-level cheshire menu itembreak;
                default:
                    returntrue;
            }
            returntrue;
        }
        // Good practice to pass through to super if not handledreturnsuper.onMenuItemSelected(featureId, item);
    }
    }

activity_main.xml (layout)

<?xml version="1.0" encoding="utf-8"?><RelativeLayoutxmlns:android="http://schemas.android.com/apk/res/android"xmlns:tools="http://schemas.android.com/tools"android:layout_width="match_parent"android:layout_height="match_parent" ><TextViewandroid:id="@+id/coming_soon"android:layout_alignParentTop="true"android:layout_width="wrap_content"android:layout_height="wrap_content"android:text="@string/voice_command_test"android:textSize="22sp"android:layout_marginRight="40px"android:layout_marginTop="30px"android:layout_marginLeft="210px" /></RelativeLayout>

strings.xml

<resources><stringname="app_name">Contextual voice commands</string><stringname="voice_start_command">Voice commands</string><stringname="voice_command_test">Say "Okay, Glass"</string><stringname="show_me_dogs">Dogs</string><stringname="labrador">labrador</string><stringname="golden">golden</string><stringname="show_me_cats">Cats</string><stringname="cheshire">cheshire</string><stringname="calico">calico</string></resources>

AndroidManifest.xml

<manifestxmlns:android="http://schemas.android.com/apk/res/android"package="com.drace.contextualvoicecommands"android:versionCode="1"android:versionName="1.0" ><uses-sdkandroid:minSdkVersion="19"android:targetSdkVersion="19" /><uses-permissionandroid:name="com.google.android.glass.permission.DEVELOPMENT"/><applicationandroid:allowBackup="true"android:icon="@drawable/ic_launcher"android:label="@string/app_name" ><activityandroid:name="com.drace.contextualvoicecommands.ContextualMenuActivity"android:label="@string/app_name" ><intent-filter><actionandroid:name="com.google.android.glass.action.VOICE_TRIGGER" /></intent-filter><meta-dataandroid:name="com.google.android.glass.VoiceTrigger"android:resource="@xml/voice_trigger_start" /></activity></application></manifest>

It's been Tested and works great under Google Glass XE22 !

Solution 3:

Solution 4:

You may want to try the contextual voice commands available in the GDK. While it does temporarily cover the screen with a menu, it allows voice-only input.

https://developers.google.com/glass/develop/gdk/voice

Solution 5:

I did something very similar for one of my applications. It doesn't require the ok glass screen at all, but the user does need to know the commands ahead of time. I explained a bit of it and provided links on this question: Check out my answer here: Glass GDk : Contextual voice commands without the "Ok Glass"

I hope this helps!

Post a Comment for "How To Navigate A Google Glass Gdk Immersion Application Using Voice Command Only?"