禁用某个应用内 activity 的权限
Disable permissions for a certain activity within an app
我正在创建一个简单的应用程序。我目前发现的是,当我打开我的应用程序时,已在 .gradle
文件夹中设置了以下权限:
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
但是我的应用程序有第二个 activity 称为 Gvoice.java,然后依赖于谷歌语音到文本。
我发现,如果我在 .gradle 文件夹中保留上述权限,我的主 activity window 与 pocketsphinx 完美配合。但是我的 Gvoice activity 给了我一条消息 'Can't open microphone'
.
但是我发现如果我去掉.gradle文件夹中的三个权限。我的主要 activity 不再执行我想要它执行的操作,但我的第二个 activity 称为 Gvoice 现在可以正确地将语音转换为文本并且不会给我 'Can't open microphone'
消息。
是否有解决此问题的简单方法,因为我认为它肯定与麦克风权限有关。另请注意,每当我 return 到主 activity 时,需要设置权限,当我在主 activity 之外的另一个 activity 时,它需要禁用许可(或者如果有另一种方法可以做到这一点)。
能否在manifest文件中为不同的activity设置不同的权限?
下面我已经更新了目前的内容,请看一下:
public class PocketSphinxActivity extends Activity implements RecognitionListener
{
private static final String KWS_SEARCH = "wakeup";
/* Keyword we are looking for to activate menu */
private static final String KEYPHRASE = "open voice command"; //adjust this keyphrase here and in string!
private SpeechRecognizer recognizer;
private HashMap<String, Integer> captions;
ListView lv;
TextView tv;
EditText a;
Button b;
Button c;
Boolean isDone = false;
@Override
public void onCreate(Bundle state) {
super.onCreate(state);
// Prepare the data for UI
captions = new HashMap<String, Integer>();
captions.put(KWS_SEARCH, R.string.kws_caption);
setContentView(R.layout.main);
((TextView) findViewById(R.id.caption_text))
.setText("Preparing the recognizer");
lv = (ListView) findViewById(R.id.lvVoiceReturn);
tv = (TextView) findViewById(R.id.result_text);
a = (EditText) findViewById(R.id.TFusername);
b = (Button) findViewById(R.id.bVoice);
c = (Button)findViewById(R.id.Blogin);
// Recognizer initialization is a time-consuming and it involves IO,
// so we execute it in async task
new AsyncTask<Void, Void, Exception>() {
@Override
protected Exception doInBackground(Void... params) {
try {
Assets assets = new Assets(PocketSphinxActivity.this);
File assetDir = assets.syncAssets();
setupRecognizer(assetDir);
} catch (IOException e) {
return e;
}
return null;
}
@Override
protected void onPostExecute(Exception result) {
if (result != null) {
((TextView) findViewById(R.id.caption_text))
.setText("Failed to init recognizer " + result);
} else {
switchSearch(KWS_SEARCH);
}
}
}.execute();
//line added.../////////////////////////
a.addTextChangedListener(new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence s, int start, int count, int after) {
}
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
if (s.toString().trim().equalsIgnoreCase("open voice command")) {
//
//Do your stuff here OR button.performClick()
//
//DELAY
Handler handler = new Handler();
handler.postDelayed(new Runnable() {
@Override
public void run() {
if (!isDone) {
b.performClick();
isDone = true;
}
}
}, 500);
}
}
@Override
public void afterTextChanged(Editable s) {
}
});
////////////////////////////////////////
}
@Override
public void onDestroy() {
super.onDestroy();
recognizer.cancel();
recognizer.shutdown();
}
/**
* In partial result we get quick updates about current hypothesis. In
* keyword spotting mode we can react here, in other modes we need to wait
* for final result in onResult.
*/
@Override
public void onPartialResult(Hypothesis hypothesis) {
if (hypothesis == null)
return;
String text = hypothesis.getHypstr();
//((TextView) findViewById(R.id.result_text)).setText(text);
((EditText) findViewById(R.id.TFusername)).setText(text);
}
/**
* This callback is called when we stop the recognizer.
*/
@Override
public void onResult(Hypothesis hypothesis) {
//((TextView) findViewById(R.id.result_text)).setText("");
((EditText) findViewById(R.id.TFusername)).setText("");
if (hypothesis != null) {
String text = hypothesis.getHypstr();
makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
//a.setText((String) tv.getText());
//tv = TextView.getText().toString();
}
}
@Override
public void onBeginningOfSpeech() {
}
/**
* We stop recognizer here to get a final result
*/
@Override
public void onEndOfSpeech() {
if (!recognizer.getSearchName().equals(KWS_SEARCH))
switchSearch(KWS_SEARCH);
}
private void switchSearch(String searchName) {
recognizer.stop();
// If we are not spotting, start listening with timeout (10000 ms or 10 seconds).
if (searchName.equals(KWS_SEARCH))
recognizer.startListening(searchName);
else
recognizer.startListening(searchName, 10000);
String caption = getResources().getString(captions.get(searchName));
((TextView) findViewById(R.id.caption_text)).setText(caption);
}
private void setupRecognizer(File assetsDir) throws IOException {
// The recognizer can be configured to perform multiple searches
// of different kind and switch between them
recognizer = defaultSetup()
.setAcousticModel(new File(assetsDir, "en-us-ptm"))
.setDictionary(new File(assetsDir, "cmudict-en-us.dict"))
// To disable logging of raw audio comment out this call (takes a lot of space on the device)
.setRawLogDir(assetsDir)
// Threshold to tune for keyphrase to balance between false alarms and misses
.setKeywordThreshold(1e-45f)
// Use context-independent phonetic search, context-dependent is too slow for mobile
.setBoolean("-allphone_ci", true)
.getRecognizer();
recognizer.addListener(this);
/** In your application you might not need to add all those searches.
* They are added here for demonstration. You can leave just one.
*/
// Create keyword-activation search.
recognizer.addKeyphraseSearch(KWS_SEARCH, KEYPHRASE);
}
@Override
public void onError(Exception error) {
((TextView) findViewById(R.id.caption_text)).setText(error.getMessage());
}
@Override
public void onTimeout() {
switchSearch(KWS_SEARCH);
}
//Assign button clicks to go to a new activity:
public void onButtonClick_1(View v){
if (v.getId() == R.id.bVoice){
String str_1 = a.getText().toString();
//Go to the relevant page if any part of the phrase or word entered in the 'EditText' field contains 'command' which is not case sensitive
if (str_1.toLowerCase().contains("command")) {
Intent userintent = new Intent(PocketSphinxActivity.this, Gvoice.class);
startActivity(userintent);
} else {
Toast.makeText(getApplicationContext(), "Incorrect Information", Toast.LENGTH_SHORT).show();
}
}
}
我发现另一种方法是将以下内容添加到我的清单文件中 android:noHistory="true"
这似乎清除了主 activity 堆栈的历史记录,现在 onClickListener 在第二个 activity.
另一种方法是将以下内容放在我的 MainActivity 文件的底部,这是 @brandall 建议的 onPause
方法:
@Override
protected void onPause() {
super.onPause();
finish();
}
我在评论中说:
使用noHistory
不是解决方案,它恰好适用于大多数情况。当您的应用程序不再处于前台时,由于收到来电等原因,您不希望失去用户体验,您希望它 暂停 。正确管理onPause和onResume是一个基本的基本要求,使用noHistory
会暂时为你避免。
下面是一个示例,说明当您的应用程序暂停并随后恢复时如何管理 recognizer
对象。
@Override
protected void onResume() {
super.onResume();
if (recognizer == null) {
// Set up recognizer again
}
}
@Override
protected void onPause() {
super.onPause();
if (recognizer != null) {
recognizer.cancel();
recognizer.shutdown();
recognizer = null;
}
}
每次 暂停 时销毁您的应用程序将解决释放麦克风和识别器资源的解决方案,这不是您想要做的 - 您想要优雅地管理您的应用程序进入后台并再次进入前台的生命周期,而您持有的资源(例如麦克风)不会导致其他应用程序出现问题。
我正在创建一个简单的应用程序。我目前发现的是,当我打开我的应用程序时,已在 .gradle
文件夹中设置了以下权限:
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
但是我的应用程序有第二个 activity 称为 Gvoice.java,然后依赖于谷歌语音到文本。
我发现,如果我在 .gradle 文件夹中保留上述权限,我的主 activity window 与 pocketsphinx 完美配合。但是我的 Gvoice activity 给了我一条消息 'Can't open microphone'
.
但是我发现如果我去掉.gradle文件夹中的三个权限。我的主要 activity 不再执行我想要它执行的操作,但我的第二个 activity 称为 Gvoice 现在可以正确地将语音转换为文本并且不会给我 'Can't open microphone'
消息。
是否有解决此问题的简单方法,因为我认为它肯定与麦克风权限有关。另请注意,每当我 return 到主 activity 时,需要设置权限,当我在主 activity 之外的另一个 activity 时,它需要禁用许可(或者如果有另一种方法可以做到这一点)。
能否在manifest文件中为不同的activity设置不同的权限?
下面我已经更新了目前的内容,请看一下:
public class PocketSphinxActivity extends Activity implements RecognitionListener
{
private static final String KWS_SEARCH = "wakeup";
/* Keyword we are looking for to activate menu */
private static final String KEYPHRASE = "open voice command"; //adjust this keyphrase here and in string!
private SpeechRecognizer recognizer;
private HashMap<String, Integer> captions;
ListView lv;
TextView tv;
EditText a;
Button b;
Button c;
Boolean isDone = false;
@Override
public void onCreate(Bundle state) {
super.onCreate(state);
// Prepare the data for UI
captions = new HashMap<String, Integer>();
captions.put(KWS_SEARCH, R.string.kws_caption);
setContentView(R.layout.main);
((TextView) findViewById(R.id.caption_text))
.setText("Preparing the recognizer");
lv = (ListView) findViewById(R.id.lvVoiceReturn);
tv = (TextView) findViewById(R.id.result_text);
a = (EditText) findViewById(R.id.TFusername);
b = (Button) findViewById(R.id.bVoice);
c = (Button)findViewById(R.id.Blogin);
// Recognizer initialization is a time-consuming and it involves IO,
// so we execute it in async task
new AsyncTask<Void, Void, Exception>() {
@Override
protected Exception doInBackground(Void... params) {
try {
Assets assets = new Assets(PocketSphinxActivity.this);
File assetDir = assets.syncAssets();
setupRecognizer(assetDir);
} catch (IOException e) {
return e;
}
return null;
}
@Override
protected void onPostExecute(Exception result) {
if (result != null) {
((TextView) findViewById(R.id.caption_text))
.setText("Failed to init recognizer " + result);
} else {
switchSearch(KWS_SEARCH);
}
}
}.execute();
//line added.../////////////////////////
a.addTextChangedListener(new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence s, int start, int count, int after) {
}
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
if (s.toString().trim().equalsIgnoreCase("open voice command")) {
//
//Do your stuff here OR button.performClick()
//
//DELAY
Handler handler = new Handler();
handler.postDelayed(new Runnable() {
@Override
public void run() {
if (!isDone) {
b.performClick();
isDone = true;
}
}
}, 500);
}
}
@Override
public void afterTextChanged(Editable s) {
}
});
////////////////////////////////////////
}
@Override
public void onDestroy() {
super.onDestroy();
recognizer.cancel();
recognizer.shutdown();
}
/**
* In partial result we get quick updates about current hypothesis. In
* keyword spotting mode we can react here, in other modes we need to wait
* for final result in onResult.
*/
@Override
public void onPartialResult(Hypothesis hypothesis) {
if (hypothesis == null)
return;
String text = hypothesis.getHypstr();
//((TextView) findViewById(R.id.result_text)).setText(text);
((EditText) findViewById(R.id.TFusername)).setText(text);
}
/**
* This callback is called when we stop the recognizer.
*/
@Override
public void onResult(Hypothesis hypothesis) {
//((TextView) findViewById(R.id.result_text)).setText("");
((EditText) findViewById(R.id.TFusername)).setText("");
if (hypothesis != null) {
String text = hypothesis.getHypstr();
makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
//a.setText((String) tv.getText());
//tv = TextView.getText().toString();
}
}
@Override
public void onBeginningOfSpeech() {
}
/**
* We stop recognizer here to get a final result
*/
@Override
public void onEndOfSpeech() {
if (!recognizer.getSearchName().equals(KWS_SEARCH))
switchSearch(KWS_SEARCH);
}
private void switchSearch(String searchName) {
recognizer.stop();
// If we are not spotting, start listening with timeout (10000 ms or 10 seconds).
if (searchName.equals(KWS_SEARCH))
recognizer.startListening(searchName);
else
recognizer.startListening(searchName, 10000);
String caption = getResources().getString(captions.get(searchName));
((TextView) findViewById(R.id.caption_text)).setText(caption);
}
private void setupRecognizer(File assetsDir) throws IOException {
// The recognizer can be configured to perform multiple searches
// of different kind and switch between them
recognizer = defaultSetup()
.setAcousticModel(new File(assetsDir, "en-us-ptm"))
.setDictionary(new File(assetsDir, "cmudict-en-us.dict"))
// To disable logging of raw audio comment out this call (takes a lot of space on the device)
.setRawLogDir(assetsDir)
// Threshold to tune for keyphrase to balance between false alarms and misses
.setKeywordThreshold(1e-45f)
// Use context-independent phonetic search, context-dependent is too slow for mobile
.setBoolean("-allphone_ci", true)
.getRecognizer();
recognizer.addListener(this);
/** In your application you might not need to add all those searches.
* They are added here for demonstration. You can leave just one.
*/
// Create keyword-activation search.
recognizer.addKeyphraseSearch(KWS_SEARCH, KEYPHRASE);
}
@Override
public void onError(Exception error) {
((TextView) findViewById(R.id.caption_text)).setText(error.getMessage());
}
@Override
public void onTimeout() {
switchSearch(KWS_SEARCH);
}
//Assign button clicks to go to a new activity:
public void onButtonClick_1(View v){
if (v.getId() == R.id.bVoice){
String str_1 = a.getText().toString();
//Go to the relevant page if any part of the phrase or word entered in the 'EditText' field contains 'command' which is not case sensitive
if (str_1.toLowerCase().contains("command")) {
Intent userintent = new Intent(PocketSphinxActivity.this, Gvoice.class);
startActivity(userintent);
} else {
Toast.makeText(getApplicationContext(), "Incorrect Information", Toast.LENGTH_SHORT).show();
}
}
}
我发现另一种方法是将以下内容添加到我的清单文件中 android:noHistory="true"
这似乎清除了主 activity 堆栈的历史记录,现在 onClickListener 在第二个 activity.
另一种方法是将以下内容放在我的 MainActivity 文件的底部,这是 @brandall 建议的 onPause
方法:
@Override
protected void onPause() {
super.onPause();
finish();
}
我在评论中说:
使用noHistory
不是解决方案,它恰好适用于大多数情况。当您的应用程序不再处于前台时,由于收到来电等原因,您不希望失去用户体验,您希望它 暂停 。正确管理onPause和onResume是一个基本的基本要求,使用noHistory
会暂时为你避免。
下面是一个示例,说明当您的应用程序暂停并随后恢复时如何管理 recognizer
对象。
@Override
protected void onResume() {
super.onResume();
if (recognizer == null) {
// Set up recognizer again
}
}
@Override
protected void onPause() {
super.onPause();
if (recognizer != null) {
recognizer.cancel();
recognizer.shutdown();
recognizer = null;
}
}
每次 暂停 时销毁您的应用程序将解决释放麦克风和识别器资源的解决方案,这不是您想要做的 - 您想要优雅地管理您的应用程序进入后台并再次进入前台的生命周期,而您持有的资源(例如麦克风)不会导致其他应用程序出现问题。