我无法在 google 助手上触发我的第二个操作
I can't trigger my second action on google assistant
我一直在尝试使用 actions sdk,它似乎有效,但仅用于我的主要目的。我添加了第二个意图,但它从未触发。
这是我的 action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "conversation_1"
},
"intent": {
"name": "actions.intent.MAIN"
}
},
{
"name": "add",
"intent": {
"name": "myintent.ADD",
"parameters": [
{
"name": "somenumber",
"type": "SchemaOrg_Number"
}
],
"trigger": {
"queryPatterns": [
"add $SchemaOrg_Number:somenumber",
"add"
]
}
},
"fulfillment": {
"conversationName": "add"
}
}
],
"conversations": {
"conversation_1": {
"name": "conversation_1",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
},
"add": {
"name": "add",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
}
}
}
这是我的 index.js:
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const ActionsSdkApp = require('actions-on-google').ActionsSdkApp;
const functions = require('firebase-functions');
const NO_INPUTS = [
'I didn\'t hear that.',
'If you\'re still there, say that again.',
'We can stop here. See you soon.'
];
exports.sayNumber = functions.https.onRequest((request, response) => {
const app = new ActionsSdkApp({request, response});
function mainIntent (app) {
console.log('mainIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can read out an ordinal like ' +
'<say-as interpret-as="ordinal">123</say-as>. Say a number.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function addIntent (app) {
console.log('addIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can add.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function rawInput (app) {
console.log('rawInput');
if (app.getRawInput() === 'bye') {
app.tell('Goodbye!');
} else {
let inputPrompt = app.buildInputPrompt(true, '<speak>You said, <say-as interpret-as="ordinal">' +
app.getRawInput() + '</say-as>'+app.getIntent()+'</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
}
let actionMap = new Map();
actionMap.set(app.StandardIntents.MAIN, mainIntent);
actionMap.set(app.StandardIntents.TEXT, rawInput);
actionMap.set("myintent.ADD", addIntent);
app.handleRequest(actionMap);
});
我可以说 talk to my action name
,然后我说的所有内容都会作为原始输入处理,即使我使用 add
关键字也是如此。我究竟做错了什么?
没错。 actions.json
包仅定义用户如何 start a conversation with your Action. Once the conversation has started, you are passed TEXT (or OPTION) intents and you are expected to handle the natural language processing yourself. Additional intents can be used for speech biasing,但不用于解析响应。
这与其他一些语音代理处理语言解析的方式不同。 Actions SDK 主要适用于您已经拥有自己的 NLP 的情况。
如果你不这样做,你可能最好使用 Dialogflow 或 Converse.AI。
我一直在尝试使用 actions sdk,它似乎有效,但仅用于我的主要目的。我添加了第二个意图,但它从未触发。
这是我的 action.json:
{
"actions": [
{
"description": "Default Welcome Intent",
"name": "MAIN",
"fulfillment": {
"conversationName": "conversation_1"
},
"intent": {
"name": "actions.intent.MAIN"
}
},
{
"name": "add",
"intent": {
"name": "myintent.ADD",
"parameters": [
{
"name": "somenumber",
"type": "SchemaOrg_Number"
}
],
"trigger": {
"queryPatterns": [
"add $SchemaOrg_Number:somenumber",
"add"
]
}
},
"fulfillment": {
"conversationName": "add"
}
}
],
"conversations": {
"conversation_1": {
"name": "conversation_1",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
},
"add": {
"name": "add",
"url": "https://myaddress/sayNumber",
"fulfillmentApiVersion": 2
}
}
}
这是我的 index.js:
'use strict';
process.env.DEBUG = 'actions-on-google:*';
const ActionsSdkApp = require('actions-on-google').ActionsSdkApp;
const functions = require('firebase-functions');
const NO_INPUTS = [
'I didn\'t hear that.',
'If you\'re still there, say that again.',
'We can stop here. See you soon.'
];
exports.sayNumber = functions.https.onRequest((request, response) => {
const app = new ActionsSdkApp({request, response});
function mainIntent (app) {
console.log('mainIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can read out an ordinal like ' +
'<say-as interpret-as="ordinal">123</say-as>. Say a number.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function addIntent (app) {
console.log('addIntent');
let inputPrompt = app.buildInputPrompt(true, '<speak>Hi! <break time="1"/> ' +
'I can add.</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
function rawInput (app) {
console.log('rawInput');
if (app.getRawInput() === 'bye') {
app.tell('Goodbye!');
} else {
let inputPrompt = app.buildInputPrompt(true, '<speak>You said, <say-as interpret-as="ordinal">' +
app.getRawInput() + '</say-as>'+app.getIntent()+'</speak>', NO_INPUTS);
app.ask(inputPrompt);
}
}
let actionMap = new Map();
actionMap.set(app.StandardIntents.MAIN, mainIntent);
actionMap.set(app.StandardIntents.TEXT, rawInput);
actionMap.set("myintent.ADD", addIntent);
app.handleRequest(actionMap);
});
我可以说 talk to my action name
,然后我说的所有内容都会作为原始输入处理,即使我使用 add
关键字也是如此。我究竟做错了什么?
没错。 actions.json
包仅定义用户如何 start a conversation with your Action. Once the conversation has started, you are passed TEXT (or OPTION) intents and you are expected to handle the natural language processing yourself. Additional intents can be used for speech biasing,但不用于解析响应。
这与其他一些语音代理处理语言解析的方式不同。 Actions SDK 主要适用于您已经拥有自己的 NLP 的情况。
如果你不这样做,你可能最好使用 Dialogflow 或 Converse.AI。