参考链接:/qazwyc/article/details/57153734
本文所有代码链接:/s/1LT5LBkOOGrzMyg6GADf-Og
开头没有加入强制编码三句:
import sys
reload(sys)
sys.setdefaultencoding(‘utf-8’)
导致我吃了不少苦,用python2.7运行会出现各种错误,所得到结果需要编码转换,虽然实现了,可是感觉不舒服,因没法显示汉字啊,加了上面三句可以的,我树莓派是装过汉字系统的。
用python3.5打开时如上图,如果点了OK,那就麻烦了,文件就废掉了!所以如果编辑,要用记事本打开。
树莓派中文件位置如下:
运行过程如下:
一. 没有加以上三句编码 完成的任务代码如下(无法显示汉字,请用记事本打开编辑):
#!/usr/bin/env python
import wave
import requests
import time
import base64
from pyaudio import PyAudio, paInt16
import webbrowser
import os
import signal
import RPi.GPIO as GPIO
#! BOARD编号方式,基于插座引脚编号
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.output(11, GPIO.LOW)
GPIO.output(12, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
framerate = 16000 # 采样率
num_samples = 2000 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
FILEPATH = ‘speech.wav’
base_url = “/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s”
APIKey = “bn58jNgdjRED0uKCScW9R2C6”
SecretKey = “LgLYImS2Hcxqno6QQzIMrH7Gf8uRRbZb”
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()[‘access_token’]
def save_wave_file(filepath, data):
wf = wave.open(filepath, ‘wb’)
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b’’.join(data))
wf.close()
def my_record():
pa = PyAudio()
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=num_samples)
my_buf = []
# count = 0
t = time.time()
print(‘record…’)
while time.time() < t + 4: # secondstring_audio_data = stream.read(num_samples)my_buf.append(string_audio_data)print('record over.')save_wave_file(FILEPATH, my_buf)stream.close()
def get_audio(file):
with open(file, ‘rb’) as f:
data = f.read()
return data
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = ‘wav’
RATE = ‘16000’
CHANNEL = 1
CUID = ‘*******’
SPEECH = base64.b64encode(speech_data).decode(‘utf-8’)
data = {'format': FORMAT,'rate': RATE,'channel': CHANNEL,'cuid': CUID,'len': len(speech_data),'speech': SPEECH,'token': token,'dev_pid':dev_pid}url = '/server_api'headers = {'Content-Type': 'application/json'}# r=requests.post(url,data=json.dumps(data),headers=headers)print('recognizing...')r = requests.post(url, json=data, headers=headers)Result = r.json()if 'result' in Result:return Result['result'][0]else:print 'get nothing!'
def sigint_handler(signum, frame):
stream.stop_stream()
stream.close()
p.terminate()
GPIO.cleanup()
print ‘catched interrupt signal!’
sys.exit(0)
ifname== ‘main’:
flag = ‘y’
# 注册ctrl-c中断
signal.signal(signal.SIGINT, sigint_handler)
while flag.lower() == 'y':my_record()TOKEN = getToken(HOST)speech = get_audio(FILEPATH)words = speech2text(speech, TOKEN)#默认1537print(words)result = words.encode('gbk')# gbk编码,此编码也可用于指定网页的打开,但对于百度自动搜索会出乱码if ('开灯') in result: GPIO.output(11, GPIO.HIGH)GPIO.output(12, GPIO.HIGH)GPIO.output(13, GPIO.HIGH)print 'turn on light'elif ('亮红灯') in result:GPIO.output(11, GPIO.HIGH)GPIO.output(12, GPIO.LOW)GPIO.output(13, GPIO.LOW)print 'red light'elif ('关灯') in result:GPIO.output(11, GPIO.LOW)GPIO.output(12, GPIO.LOW)GPIO.output(13, GPIO.LOW)print 'turn off light'elif ('放首歌') in result:print 'you mei you na me yi shou ge!'os.system('mplayer song.mp3')else:print 'please say words in: kai deng, liang hong deng, guan deng, fang shou ge'flag = raw_input('Continue?(y/n):\n')#python3用input()GPIO.cleanup()
二. 加了强制编码三句,结合图灵机器人,完成任务的如下(可控制灯,可播放本地音乐,可跟机器人聊天,显示汉字):
#!/usr/bin/env python
import wave
import requests
import time
import base64
from pyaudio import PyAudio, paInt16
import webbrowser
import os
import sys
reload(sys)
sys.setdefaultencoding(‘utf-8’)
import signal
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.output(11, GPIO.LOW)
GPIO.output(12, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
framerate = 16000 # 采样率
num_samples = 2000 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
FILEPATH = ‘speech.wav’
base_url = “/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s”
APIKey = “bn58jNgdjRED0uKCScW9R2C6”
SecretKey = “LgLYImS2Hcxqno6QQzIMrH7Gf8uRRbZb”
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()[‘access_token’]
def save_wave_file(filepath, data):
wf = wave.open(filepath, ‘wb’)
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b’’.join(data))
wf.close()
def my_record():
pa = PyAudio()
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=num_samples)
my_buf = []
# count = 0
t = time.time()
print(‘record…’)
while time.time() < t + 4: # secondstring_audio_data = stream.read(num_samples)my_buf.append(string_audio_data)print('record over.')save_wave_file(FILEPATH, my_buf)stream.close()
def get_audio(file):
with open(file, ‘rb’) as f:
data = f.read()
return data
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = ‘wav’
RATE = ‘16000’
CHANNEL = 1
CUID = ‘*******’
SPEECH = base64.b64encode(speech_data).decode(‘utf-8’)
data = {'format': FORMAT,'rate': RATE,'channel': CHANNEL,'cuid': CUID,'len': len(speech_data),'speech': SPEECH,'token': token,'dev_pid':dev_pid}url = '/server_api'headers = {'Content-Type': 'application/json'}# r=requests.post(url,data=json.dumps(data),headers=headers)print('recognizing...')r = requests.post(url, json=data, headers=headers)Result = r.json()if 'result' in Result:return Result['result'][0]else:print 'get nothing!'
def sigint_handler(signum, frame):
stream.stop_stream()
stream.close()
p.terminate()
GPIO.cleanup()
print ‘catched interrupt signal!’
sys.exit(0)
ifname== ‘main’:
flag = ‘y’
# 注册ctrl-c中断
signal.signal(signal.SIGINT, sigint_handler)
while flag.lower() == ‘y’:
print(‘请在以下内容中选说:开灯,亮红灯,关灯,放首歌,英文歌’)
my_record()
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN)#默认1537
print(result)
if (‘开灯’) in result:
GPIO.output(11, GPIO.HIGH)
GPIO.output(12, GPIO.HIGH)
GPIO.output(13, GPIO.HIGH)
print ‘turn on light’
elif ('亮红灯') in result:GPIO.output(11, GPIO.HIGH)GPIO.output(12, GPIO.LOW)GPIO.output(13, GPIO.LOW)print 'red light'elif ('关灯') in result:GPIO.output(11, GPIO.LOW)GPIO.output(12, GPIO.LOW)GPIO.output(13, GPIO.LOW)print 'turn off light'elif ('放首歌') in result:print '周华健的有没有那么一首歌'os.system('mplayer song.mp3')elif ('英文歌') in result:print 'Carpenters - Yesterday Once More'os.system('mplayer yesterday.mp3') else:print '图灵机器人接管服务'flag = raw_input('Continue?(y/n):\n')#python3用input()GPIO.cleanup()
三. 因为图灵机器人接收和返回的都是文字,所以以上版本的返回(除了音乐)都是文字,把文字快速在本地转成声音并播放(不用百度语音合成,因语音合成很成熟很简单,百度合成还需要再上网,慢)代码如下:
#!/usr/bin/env python
import wave
import requests
import time
import urllib
import json
import base64
from pyaudio import PyAudio, paInt16
import webbrowser
import os
import sys
reload(sys)
sys.setdefaultencoding(‘utf-8’)
import signal
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.output(11, GPIO.LOW)
GPIO.output(12, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
framerate = 16000 # 采样率
num_samples = 2000 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
FILEPATH = ‘speech.wav’
base_url = “/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s”
APIKey = “bn58jNgdjRED0uKCScW9R2C6”
SecretKey = “LgLYImS2Hcxqno6QQzIMrH7Gf8uRRbZb”
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()[‘access_token’]
def save_wave_file(filepath, data):
wf = wave.open(filepath, ‘wb’)
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b’’.join(data))
wf.close()
def my_record():
pa = PyAudio()
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=num_samples)
my_buf = []
# count = 0
t = time.time()
print(‘record…’)
while time.time() < t + 4: # secondstring_audio_data = stream.read(num_samples)my_buf.append(string_audio_data)print('record over.')save_wave_file(FILEPATH, my_buf)stream.close()
def get_audio(file):
with open(file, ‘rb’) as f:
data = f.read()
return data
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = ‘wav’
RATE = ‘16000’
CHANNEL = 1
CUID = ‘*******’
SPEECH = base64.b64encode(speech_data).decode(‘utf-8’)
data = {'format': FORMAT,'rate': RATE,'channel': CHANNEL,'cuid': CUID,'len': len(speech_data),'speech': SPEECH,'token': token,'dev_pid':dev_pid}url = '/server_api'headers = {'Content-Type': 'application/json'}# r=requests.post(url,data=json.dumps(data),headers=headers)print('recognizing...')r = requests.post(url, json=data, headers=headers)Result = r.json()if 'result' in Result:return Result['result'][0]else:print 'get nothing!'
def sigint_handler(signum, frame):
stream.stop_stream()
stream.close()
p.terminate()
GPIO.cleanup()
print ‘catched interrupt signal!’
sys.exit(0)
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
key = ‘05ba411481c8cfa61b91124ef7389767’
api = ‘/openapi/api?key=’ + key + ‘&info=’
ifname== ‘main’:
flag = ‘y’
# 注册ctrl-c中断
signal.signal(signal.SIGINT, sigint_handler)
while flag.lower() == ‘y’:
print(‘请在以下内容中选说:开灯,亮红灯,关灯,放首歌,英文歌’)
my_record()
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN)#默认1537
print(result)
if (‘开灯’) in result:
GPIO.output(11, GPIO.HIGH)
GPIO.output(12, GPIO.HIGH)
GPIO.output(13, GPIO.HIGH)
print ‘turn on light’
elif (‘亮红灯’) in result:
GPIO.output(11, GPIO.HIGH)
GPIO.output(12, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
print ‘red light’
elif (‘关灯’) in result:
GPIO.output(11, GPIO.LOW)
GPIO.output(12, GPIO.LOW)
GPIO.output(13, GPIO.LOW)
print ‘turn off light’
elif (‘放首歌’) in result:
print ‘周华健的有没有那么一首歌’
os.system(‘mplayer song.mp3’)
elif (‘英文歌’) in result:
print ‘Carpenters - Yesterday Once More’
os.system(‘mplayer yesterday.mp3’)
else:
print ‘图灵机器人接管服务’
request = api + str(result)
response = getHtml(request)
dic_json = json.loads(response)
print '机器人: '.decode(‘utf-8’) + dic_json[‘text’]
a = dic_json[‘text’]
print type(a)
# 将a的文本存入3.txt
f=open(“Voicecontrol3.txt”,‘w’)
f.truncate()
f.write(a)
f.close()
# 通过ekho语音合成为3.wav并播放
os.system(‘ekho -f Voicecontrol3.txt -o Voicecontrol3.wav’)
os.system(‘mplayer Voicecontrol3.wav’)
flag = raw_input(‘Continue?(y/n):\n’)#python3用input()
GPIO.cleanup()