Python调⽤百度api实现语⾳识别详解
最近在学习python,做⼀些python练习题
有⼀题是这样的:
使⽤ Python 实现:对着电脑吼⼀声,⾃动打开浏览器中的默认⽹站。
例如,对着笔记本电脑吼⼀声“百度”,浏览器⾃动打开百度⾸页。
然后开始search相应的功能需要的模块(windows10),理⼀下思路:
1. 本地录⾳
2. 上传录⾳,获得返回结果
3. 组⼀个map,根据结果打开相应的⽹页
所需模块:
1. PyAudio:录⾳接⼝
百度api接口2. wave:打开录⾳⽂件并设置⾳频参数
3. requests:GET/POST
为什么要⽤百度语⾳识别api呢?因为免费试⽤。。
不多说,登录百度云,创建应⽤
查看⽂档
⽂档写的蛮详细的,简单概括就是
1.可以下载使⽤SDK
2.不需要下载使⽤SDK
选择2.
1. 根据⽂档组装url获取token
2. 处理本地⾳频以JSON格式POST到百度语⾳识别服务器,获得返回结果
语⾳格式
格式⽀持:pcm(不压缩)、wav(不压缩,pcm编码)、amr(压缩格式)。推荐pcm 采样率:16000 固定值。编码:16bit 位深的单声道。
百度服务端会将⾮pcm格式,转为pcm格式,因此使⽤wav、amr会有额外的转换耗时。
保存为pcm格式可以识别,只是windows⾃带播放器识别不了pcm格式的,所以改⽤wav格式,毕竟⽤的模块是wave?
⾸先是本地录⾳
import wave
from pyaudio import PyAudio, paInt16
framerate = 16000 # 采样率
num_samples = 2000 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
FILEPATH = 'speech.wav'
def save_wave_file(filepath, data):
wf = wave.open(filepath, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b''.join(data))
wf.close()
#录⾳
def my_record():
pa = PyAudio()
#打开⼀个新的⾳频stream
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=num_samples)
my_buf = [] #存放录⾳数据
t = time.time()
print('正在录⾳...')
while time.time() < t + 4: # 设置录⾳时间(秒)
#循环read,每次read 2000frames
string_audio_data = ad(num_samples)
my_buf.append(string_audio_data)
print('录⾳结束.')
save_wave_file(FILEPATH, my_buf)
stream.close()
然后是获取token
import requests
import base64 #百度语⾳要求对本地语⾳⼆进制数据进⾏base64编码
#组装url获取token,详见⽂档
base_url = "openapi.baidu/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s" APIKey = "LZAdqHUGC********mbfKm"
SecretKey = "WYPPwgHu********BU6GM*****"
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()['access_token']
#传⼊语⾳⼆进制数据,token
#dev_pid为百度语⾳识别提供的⼏种语⾔选择
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = 'wav'
RATE = '16000'
CHANNEL = 1
CUID = '********'
SPEECH = base64.b64encode(speech_data).decode('utf-8')
data = {
'format': FORMAT,
'rate': RATE,
'channel': CHANNEL,
'cuid': CUID,
'len': len(speech_data),
'speech': SPEECH,
'token': token,
'dev_pid':dev_pid
}
url = 'vop.baidu/server_api'
headers = {'Content-Type': 'application/json'}
# r=requests.post(url,data=json.dumps(data),headers=headers)
print('正在识别...')
r = requests.post(url, json=data, headers=headers)
Result = r.json()
if 'result' in Result:
return Result['result'][0]
else:
return Result
最后就是对返回的结果进⾏匹配,这⾥使⽤webbrowser这个模块
webbrower.open(url)
完整demo
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Date : 2018-12-02 19:04:55
import wave
import requests
import time
import base64
from pyaudio import PyAudio, paInt16
import webbrowser
framerate = 16000 # 采样率
num_samples = 2000 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
FILEPATH = 'speech.wav'
base_url = "openapi.baidu/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s" APIKey = "********"
SecretKey = "************"
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()['access_token']
def save_wave_file(filepath, data):
wf = wave.open(filepath, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b''.join(data))
wf.close()
def my_record():
pa = PyAudio()
stream = pa.open(format=paInt16, channels=channels,
rate=framerate, input=True, frames_per_buffer=num_samples)
my_buf = []
# count = 0
t = time.time()
print('正在录⾳...')
while time.time() < t + 4: # 秒
string_audio_data = ad(num_samples)
my_buf.append(string_audio_data)
print('录⾳结束.')
save_wave_file(FILEPATH, my_buf)
stream.close()
def get_audio(file):
with open(file, 'rb') as f:
data = f.read()
return data
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = 'wav'
RATE = '16000'
CHANNEL = 1
CUID = '*******'
SPEECH = base64.b64encode(speech_data).decode('utf-8')
data = {
'format': FORMAT,
'rate': RATE,
'channel': CHANNEL,
'cuid': CUID,
'len': len(speech_data),
'speech': SPEECH,
'token': token,
'dev_pid':dev_pid
}
url = 'vop.baidu/server_api'
headers = {'Content-Type': 'application/json'}
# r=requests.post(url,data=json.dumps(data),headers=headers)
print('正在识别...')
r = requests.post(url, json=data, headers=headers)
Result = r.json()
if 'result' in Result:
return Result['result'][0]
else:
return Result
def openbrowser(text):
maps = {
'百度': ['百度', 'baidu'],
'腾讯': ['腾讯', 'tengxun'],
'⽹易': ['⽹易', 'wangyi']
}
if text in maps['百度']:
webbrowser.open_new_tab('www.baidu')
elif text in maps['腾讯']:
webbrowser.open_new_tab('www.qq')
elif text in maps['⽹易']:
webbrowser.open_new_tab('www.163/')
else:
webbrowser.open_new_tab('www.baidu/s?wd=%s' % text)
if __name__ == '__main__':
flag = 'y'
while flag.lower() == 'y':
print('请输⼊数字选择语⾔:')
devpid = input('1536:普通话(简单英⽂),1537:普通话(有标点),1737:英语,1637:粤语,1837:四川话\n')
my_record()
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN, int(devpid))
print(result)
if type(result) == str:
openbrowser(result.strip(','))
flag = input('Continue?(y/n):')
经测试,⼤吼效果更佳
到此这篇关于Python调⽤百度api实现语⾳识别详解的⽂章就介绍到这了,更多相关Python 语⾳识别内容请搜索以前的⽂章或继续浏览下⾯的相关⽂章希望⼤家以后多多⽀持!
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论