110 lines
3.0 KiB
GDScript
110 lines
3.0 KiB
GDScript
extends Node
|
||
|
||
signal speech_recognition_successed
|
||
|
||
## 录音效果器
|
||
var effect: AudioEffectRecord
|
||
## 录音捕获效果器(用于判断录音音量)
|
||
var capture: AudioEffectCapture
|
||
|
||
|
||
func _ready():
|
||
# We get the index of the "Record" bus.
|
||
var idx = AudioServer.get_bus_index("Record")
|
||
# And use it to retrieve its first effect, which has been defined
|
||
# as an "AudioEffectRecord" resource.
|
||
effect = AudioServer.get_bus_effect(idx, 0)
|
||
capture = AudioServer.get_bus_effect(idx, 1)
|
||
|
||
## 启动录音
|
||
func startRecord():
|
||
print("启动录音")
|
||
if not effect.is_recording_active():
|
||
effect.set_recording_active(true)
|
||
|
||
|
||
## 停止录音
|
||
func stopRecord():
|
||
if effect.is_recording_active():
|
||
effect.set_recording_active(false)
|
||
|
||
var audio_sd = preload("res://Assets/training_speech/sd.mp3")
|
||
var audio_zlgzmhfzc = preload("res://Assets/training_speech/zlgzmhfzc.mp3")
|
||
|
||
|
||
## 播放回复
|
||
## PS: 是协程函数,外部可以await
|
||
func play_reply(reply):
|
||
if reply == null:
|
||
return
|
||
stopRecord()
|
||
assert(reply is AudioStream, "reply不是音频资源")
|
||
## 确保不循环播放
|
||
if reply is AudioStreamMP3:
|
||
reply.loop = false
|
||
if reply is AudioStreamOggVorbis:
|
||
reply.loop = false
|
||
if reply is AudioStreamWAV:
|
||
reply.loop_mode = AudioStreamWAV.LOOP_DISABLED
|
||
$AudioStreamPlayer.stream = reply
|
||
$AudioStreamPlayer.play()
|
||
await $AudioStreamPlayer.finished
|
||
|
||
## 录音并语音识别检查
|
||
## PS: 是协程函数,外部如果关心结果需await
|
||
func speech_record_check(keywords):
|
||
startRecord()
|
||
$Timer.start()
|
||
await speech_recognition_successed
|
||
$Timer.stop()
|
||
stopRecord()
|
||
|
||
## 长时间没有说话阈值
|
||
const VolumeMin = 0.05
|
||
const LongTimeNoVoice = 1.5
|
||
var novoiceTime = 0
|
||
|
||
func _on_timer_timeout():
|
||
if effect.is_recording_active():
|
||
print(capture.get_buffer_length_frames(), ", ", capture.get_discarded_frames(), ", ", capture.get_frames_available())
|
||
var buf = capture.get_buffer(capture.get_frames_available())
|
||
var hasvoice = false
|
||
for vec in buf:
|
||
if vec.x > VolumeMin or vec.y > VolumeMin:
|
||
#print("Left channel volume = ", vec.x, ", Right volume = ", vec.y)
|
||
hasvoice = true
|
||
if hasvoice:
|
||
if novoiceTime > LongTimeNoVoice:
|
||
novoiceTime = 0
|
||
## 重新开始录音
|
||
stopRecord()
|
||
startRecord()
|
||
else:
|
||
novoiceTime += $Timer.wait_time
|
||
if novoiceTime >= LongTimeNoVoice:
|
||
print("长时间没有说话: ", novoiceTime)
|
||
var rcd = effect.get_recording()
|
||
stopRecord()
|
||
await play_reply(rcd)
|
||
if rcd.get_length() > 5:
|
||
speech_recognition_successed.emit()
|
||
else:
|
||
startRecord()
|
||
#if effect.is_recording_active():
|
||
#recording = effect.get_recording()
|
||
#print(recording.data.size(), ", ", recording.format, ", ", recording.loop_mode, ", ", recording.get_length())
|
||
#stopRecord()
|
||
#var player = AudioStreamPlayer.new()
|
||
#add_child(player)
|
||
#player.stream = recording
|
||
#player.play()
|
||
#player.finished.connect( _on_audio_play_finished.bind(player))
|
||
#startRecord()
|
||
|
||
|
||
func _on_audio_play_finished(player):
|
||
print(player, "播放完成")
|
||
player.stop()
|
||
player.queue_free()
|
||
pass
|