JSON 不工作,是我的代码错了还是 JSON 错了
JSON not working, is my code wrong or is the JSON wrong
我加载了 tensorflow.js 毒性模型,并尝试使用提供的 json 值。但是我无法隔离任何标签。我得到的最接近的是这段代码。它只是将整个字符串提供到我的屏幕上,无论我尝试什么,我似乎都无法像 JSON obj 那样调用它,现在它只是在制作一个巨大的字符串。
// Ask the model to classify inputs
model.classify(sentences).then((predictions) => {
// semi-pretty-print results
console.log(JSON.stringify(predictions, null, 2));
// Lets try to print out more relevant results on screen which is still messy but gets the job done
let betterPredictions = JSON.stringify(predictions);
// create body variable to attach everything
var body = document.getElementsByTagName("body")[0];
//Add containing div
var div = document.createElement("div");
//Add text to div
var newHead = document.createElement("p");
var newHeadText = document.createTextNode(betterPredictions);
newHead.appendChild(newHeadText);
div.appendChild(newHead, div.childNodes[0]);
[
{
"label": "identity_attack",
"results": [
{
"probabilities": {
"0": 0.9968510270118713,
"1": 0.0031489399261772633
},
"match": false
},
{
"probabilities": {
"0": 0.9862365126609802,
"1": 0.013763549737632275
},
"match": false
},
{
"probabilities": {
"0": 0.9998451471328735,
"1": 0.00015478680143132806
},
"match": false
}
]
},
{
"label": "insult",
"results": [
{
"probabilities": {
"0": 0.02373320795595646,
"1": 0.9762668609619141
},
"match": true
},
{
"probabilities": {
"0": 0.24276699125766754,
"1": 0.7572329640388489
},
"match": true
},
{
"probabilities": {
"0": 0.997154951095581,
"1": 0.002844985108822584
},
"match": false
}
]
},
{
"label": "obscene",
"results": [
{
"probabilities": {
"0": 0.997951090335846,
"1": 0.0020489569287747145
},
"match": false
},
{
"probabilities": {
"0": 0.16121098399162292,
"1": 0.8387889862060547
},
"match": true
},
{
"probabilities": {
"0": 0.999924898147583,
"1": 0.00007506388647016138
},
"match": false
}
]
},
{
"label": "severe_toxicity",
"results": [
{
"probabilities": {
"0": 0.9999984502792358,
"1": 0.0000015903623307167436
},
"match": false
},
{
"probabilities": {
"0": 0.9985486268997192,
"1": 0.0014513932401314378
},
"match": false
},
{
"probabilities": {
"0": 0.9999998807907104,
"1": 1.036240959706447e-7
},
"match": false
}
]
},
{
"label": "sexual_explicit",
"results": [
{
"probabilities": {
"0": 0.9995192289352417,
"1": 0.0004807313671335578
},
"match": false
},
{
"probabilities": {
"0": 0.7500032782554626,
"1": 0.24999678134918213
},
"match": false
},
{
"probabilities": {
"0": 0.9998985528945923,
"1": 0.00010142526298295707
},
"match": false
}
]
},
{
"label": "threat",
"results": [
{
"probabilities": {
"0": 0.9987996816635132,
"1": 0.0012002806179225445
},
"match": false
},
{
"probabilities": {
"0": 0.9483732581138611,
"1": 0.051626741886138916
},
"match": false
},
{
"probabilities": {
"0": 0.9990767240524292,
"1": 0.0009232169832102954
},
"match": false
}
]
},
{
"label": "toxicity",
"results": [
{
"probabilities": {
"0": 0.020521018654108047,
"1": 0.9794790148735046
},
"match": true
},
{
"probabilities": {
"0": 0.05149741843342781,
"1": 0.9485026001930237
},
"match": true
},
{
"probabilities": {
"0": 0.9911190867424011,
"1": 0.008880933746695518
},
"match": false
}
]
}
]
I cannot seem to call upon it like a JSON obj no matter what I try
这是因为您正在使用 JSON.stringify(predictions, null, 2)
将整个响应转换为字符串。您应该使用 JSON.parse(predictions)
代替它。我在下面附上了一个例子
const apiResponse = `[
{
"label": "identity_attack",
"results": [
{
"probabilities": {
"0": 0.9968510270118713,
"1": 0.0031489399261772633
},
"match": false
},
{
"probabilities": {
"0": 0.9862365126609802,
"1": 0.013763549737632275
},
"match": false
},
{
"probabilities": {
"0": 0.9998451471328735,
"1": 0.00015478680143132806
},
"match": false
}
]
},
{
"label": "insult",
"results": [
{
"probabilities": {
"0": 0.02373320795595646,
"1": 0.9762668609619141
},
"match": true
},
{
"probabilities": {
"0": 0.24276699125766754,
"1": 0.7572329640388489
},
"match": true
},
{
"probabilities": {
"0": 0.997154951095581,
"1": 0.002844985108822584
},
"match": false
}
]
},
{
"label": "obscene",
"results": [
{
"probabilities": {
"0": 0.997951090335846,
"1": 0.0020489569287747145
},
"match": false
},
{
"probabilities": {
"0": 0.16121098399162292,
"1": 0.8387889862060547
},
"match": true
},
{
"probabilities": {
"0": 0.999924898147583,
"1": 0.00007506388647016138
},
"match": false
}
]
},
{
"label": "severe_toxicity",
"results": [
{
"probabilities": {
"0": 0.9999984502792358,
"1": 0.0000015903623307167436
},
"match": false
},
{
"probabilities": {
"0": 0.9985486268997192,
"1": 0.0014513932401314378
},
"match": false
},
{
"probabilities": {
"0": 0.9999998807907104,
"1": 1.036240959706447e-7
},
"match": false
}
]
},
{
"label": "sexual_explicit",
"results": [
{
"probabilities": {
"0": 0.9995192289352417,
"1": 0.0004807313671335578
},
"match": false
},
{
"probabilities": {
"0": 0.7500032782554626,
"1": 0.24999678134918213
},
"match": false
},
{
"probabilities": {
"0": 0.9998985528945923,
"1": 0.00010142526298295707
},
"match": false
}
]
},
{
"label": "threat",
"results": [
{
"probabilities": {
"0": 0.9987996816635132,
"1": 0.0012002806179225445
},
"match": false
},
{
"probabilities": {
"0": 0.9483732581138611,
"1": 0.051626741886138916
},
"match": false
},
{
"probabilities": {
"0": 0.9990767240524292,
"1": 0.0009232169832102954
},
"match": false
}
]
},
{
"label": "toxicity",
"results": [
{
"probabilities": {
"0": 0.020521018654108047,
"1": 0.9794790148735046
},
"match": true
},
{
"probabilities": {
"0": 0.05149741843342781,
"1": 0.9485026001930237
},
"match": true
},
{
"probabilities": {
"0": 0.9911190867424011,
"1": 0.008880933746695518
},
"match": false
}
]
}
]`;
const data = JSON.parse(apiResponse);
console.log('data', data);
console.log('first index', data[0]);
console.log('label', data[0]['label']);
我加载了 tensorflow.js 毒性模型,并尝试使用提供的 json 值。但是我无法隔离任何标签。我得到的最接近的是这段代码。它只是将整个字符串提供到我的屏幕上,无论我尝试什么,我似乎都无法像 JSON obj 那样调用它,现在它只是在制作一个巨大的字符串。
// Ask the model to classify inputs
model.classify(sentences).then((predictions) => {
// semi-pretty-print results
console.log(JSON.stringify(predictions, null, 2));
// Lets try to print out more relevant results on screen which is still messy but gets the job done
let betterPredictions = JSON.stringify(predictions);
// create body variable to attach everything
var body = document.getElementsByTagName("body")[0];
//Add containing div
var div = document.createElement("div");
//Add text to div
var newHead = document.createElement("p");
var newHeadText = document.createTextNode(betterPredictions);
newHead.appendChild(newHeadText);
div.appendChild(newHead, div.childNodes[0]);
[
{
"label": "identity_attack",
"results": [
{
"probabilities": {
"0": 0.9968510270118713,
"1": 0.0031489399261772633
},
"match": false
},
{
"probabilities": {
"0": 0.9862365126609802,
"1": 0.013763549737632275
},
"match": false
},
{
"probabilities": {
"0": 0.9998451471328735,
"1": 0.00015478680143132806
},
"match": false
}
]
},
{
"label": "insult",
"results": [
{
"probabilities": {
"0": 0.02373320795595646,
"1": 0.9762668609619141
},
"match": true
},
{
"probabilities": {
"0": 0.24276699125766754,
"1": 0.7572329640388489
},
"match": true
},
{
"probabilities": {
"0": 0.997154951095581,
"1": 0.002844985108822584
},
"match": false
}
]
},
{
"label": "obscene",
"results": [
{
"probabilities": {
"0": 0.997951090335846,
"1": 0.0020489569287747145
},
"match": false
},
{
"probabilities": {
"0": 0.16121098399162292,
"1": 0.8387889862060547
},
"match": true
},
{
"probabilities": {
"0": 0.999924898147583,
"1": 0.00007506388647016138
},
"match": false
}
]
},
{
"label": "severe_toxicity",
"results": [
{
"probabilities": {
"0": 0.9999984502792358,
"1": 0.0000015903623307167436
},
"match": false
},
{
"probabilities": {
"0": 0.9985486268997192,
"1": 0.0014513932401314378
},
"match": false
},
{
"probabilities": {
"0": 0.9999998807907104,
"1": 1.036240959706447e-7
},
"match": false
}
]
},
{
"label": "sexual_explicit",
"results": [
{
"probabilities": {
"0": 0.9995192289352417,
"1": 0.0004807313671335578
},
"match": false
},
{
"probabilities": {
"0": 0.7500032782554626,
"1": 0.24999678134918213
},
"match": false
},
{
"probabilities": {
"0": 0.9998985528945923,
"1": 0.00010142526298295707
},
"match": false
}
]
},
{
"label": "threat",
"results": [
{
"probabilities": {
"0": 0.9987996816635132,
"1": 0.0012002806179225445
},
"match": false
},
{
"probabilities": {
"0": 0.9483732581138611,
"1": 0.051626741886138916
},
"match": false
},
{
"probabilities": {
"0": 0.9990767240524292,
"1": 0.0009232169832102954
},
"match": false
}
]
},
{
"label": "toxicity",
"results": [
{
"probabilities": {
"0": 0.020521018654108047,
"1": 0.9794790148735046
},
"match": true
},
{
"probabilities": {
"0": 0.05149741843342781,
"1": 0.9485026001930237
},
"match": true
},
{
"probabilities": {
"0": 0.9911190867424011,
"1": 0.008880933746695518
},
"match": false
}
]
}
]
I cannot seem to call upon it like a JSON obj no matter what I try
这是因为您正在使用 JSON.stringify(predictions, null, 2)
将整个响应转换为字符串。您应该使用 JSON.parse(predictions)
代替它。我在下面附上了一个例子
const apiResponse = `[
{
"label": "identity_attack",
"results": [
{
"probabilities": {
"0": 0.9968510270118713,
"1": 0.0031489399261772633
},
"match": false
},
{
"probabilities": {
"0": 0.9862365126609802,
"1": 0.013763549737632275
},
"match": false
},
{
"probabilities": {
"0": 0.9998451471328735,
"1": 0.00015478680143132806
},
"match": false
}
]
},
{
"label": "insult",
"results": [
{
"probabilities": {
"0": 0.02373320795595646,
"1": 0.9762668609619141
},
"match": true
},
{
"probabilities": {
"0": 0.24276699125766754,
"1": 0.7572329640388489
},
"match": true
},
{
"probabilities": {
"0": 0.997154951095581,
"1": 0.002844985108822584
},
"match": false
}
]
},
{
"label": "obscene",
"results": [
{
"probabilities": {
"0": 0.997951090335846,
"1": 0.0020489569287747145
},
"match": false
},
{
"probabilities": {
"0": 0.16121098399162292,
"1": 0.8387889862060547
},
"match": true
},
{
"probabilities": {
"0": 0.999924898147583,
"1": 0.00007506388647016138
},
"match": false
}
]
},
{
"label": "severe_toxicity",
"results": [
{
"probabilities": {
"0": 0.9999984502792358,
"1": 0.0000015903623307167436
},
"match": false
},
{
"probabilities": {
"0": 0.9985486268997192,
"1": 0.0014513932401314378
},
"match": false
},
{
"probabilities": {
"0": 0.9999998807907104,
"1": 1.036240959706447e-7
},
"match": false
}
]
},
{
"label": "sexual_explicit",
"results": [
{
"probabilities": {
"0": 0.9995192289352417,
"1": 0.0004807313671335578
},
"match": false
},
{
"probabilities": {
"0": 0.7500032782554626,
"1": 0.24999678134918213
},
"match": false
},
{
"probabilities": {
"0": 0.9998985528945923,
"1": 0.00010142526298295707
},
"match": false
}
]
},
{
"label": "threat",
"results": [
{
"probabilities": {
"0": 0.9987996816635132,
"1": 0.0012002806179225445
},
"match": false
},
{
"probabilities": {
"0": 0.9483732581138611,
"1": 0.051626741886138916
},
"match": false
},
{
"probabilities": {
"0": 0.9990767240524292,
"1": 0.0009232169832102954
},
"match": false
}
]
},
{
"label": "toxicity",
"results": [
{
"probabilities": {
"0": 0.020521018654108047,
"1": 0.9794790148735046
},
"match": true
},
{
"probabilities": {
"0": 0.05149741843342781,
"1": 0.9485026001930237
},
"match": true
},
{
"probabilities": {
"0": 0.9911190867424011,
"1": 0.008880933746695518
},
"match": false
}
]
}
]`;
const data = JSON.parse(apiResponse);
console.log('data', data);
console.log('first index', data[0]);
console.log('label', data[0]['label']);