-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsmart_spearker.js
138 lines (105 loc) · 3.16 KB
/
smart_spearker.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
const Recoader = require("./recorder").Recoader;
const config = require("./config");
const fs = require("fs");
const readline = require("readline");
const simplayer = require("simplayer");
const Gpio = require("onoff").Gpio;
const SpeechToTextV1 = require("ibm-watson/speech-to-text/v1");
const { IamAuthenticator } = require("ibm-watson/auth");
const TextToSpeechV1 = require("ibm-watson/text-to-speech/v1");
const speechToText = new SpeechToTextV1({
authenticator: new IamAuthenticator(config.watson.speechToText)
});
const textToSpeech = new TextToSpeechV1({
authenticator: new IamAuthenticator(config.watson.textToSpeech)
});
class SmartSpearker {
constructor(rules) {
this.rules = rules;
this.recorder = new Recoader();
}
closeEvent() {
this.button.unexport();
}
recordSaying(callback, args) {
const self = this;
this.button = new Gpio(14, "in", "both");
this.button.watch((err, value) => {
if (value) {
return
}
self.recorder.startRecord();
// gpioで置き換え
setTimeout(() => {
self.recorder.stopRecord();
callback(self, config.tmpFile, args);
}, 5000);
});
}
recognizeSaying(self, _, callback) {
console.log("speech apiに送信");
const watsonParam = {
model: "ja-JP_BroadbandModel",
audio: fs.createReadStream(config.tmpFile),
contentType: "audio/wav"
};
speechToText.recognize(watsonParam, (error, transcript) => {
if (error) console.log("Error:", error);
else {
const unfilteredMsg =
transcript.result.results[0].alternatives[0].transcript;
const filteredMsg = unfilteredMsg.replace(/\s+/g, "");
console.log(filteredMsg);
callback(self, filteredMsg);
}
});
}
judge(self, sentence) {
// スピーチの取得
for (const rule of self.rules) {
for (const word of rule.words) {
if (sentence.indexOf(word) != -1) {
rule.callback(self);
return;
}
}
}
self.say("コマンドが見つかりませんでした");
}
async say(msg, callback = () => {}) {
console.log("to speech apiに送信");
const watsonParam = {
text: msg,
voice: "ja-JP_EmiVoice",
accept: "audio/wav"
};
const response = await textToSpeech.synthesize(watsonParam);
const audio = response.result;
const repairedFile = await textToSpeech.repairWavHeaderStream(audio);
fs.writeFileSync("tmp.wav", repairedFile);
console.log("audio.wav written with a corrected wav header");
var musicProcess = simplayer("./tmp.wav", error => {
if (error) console.log(error);
console.log("sound stop");
callback(this);
});
}
playSound(data, callback) {
fs.writeFileSync("tmp.wav", data, error => {
console.log(error);
});
console.log("sound start");
var musicProcess = simplayer("./tmp.wav", error => {
if (error) console.log(error);
console.log("sound stop");
callback(this);
});
}
run() {
this.recordSaying(this.recognizeSaying, this.judge);
}
restart() {
this.run();
}
}
module.exports.SmartSpearker = SmartSpearker;