基于在最近的研究Qt语音识别平台下。在此记录12

首先,语音识别做三件事

1.记录用户的语音文件到本地

2.将用户语音编码 使用flac或者speex进行编码

3.使用第三方语音识别API或者SDK进行分析识别语音

      眼下做的比較简单就是使用flac文件对wav音频文件进行编码

      基于Mac OSX和Win 7平台的

      win 7下使用flac.exe,详细exe帮助,读者能够使用flac.exe --help > help.txt 重定向到一个help文件里,方便查阅.

mac osx以下安装flac.dmg的安装包就可以使用flac命令

我们先看音频的录入

Qt集成了音频模块

/*
* Based on Qt Example
* PCM2WAV is not mine, I found it in Google and modified it.
*/ #include "speechInput.h" #include <QtEndian>
#include <QDebug>
#include <QPainter> WavPcmFile::WavPcmFile(const QString & name, const QAudioFormat & format_, QObject *parent_)
: QFile(name, parent_), format(format_)
{
} bool WavPcmFile::hasSupportedFormat()
{
return (format.sampleSize() == 8
&& format.sampleType() == QAudioFormat::UnSignedInt)
|| (format.sampleSize() > 8
&& format.sampleType() == QAudioFormat::SignedInt
&& format.byteOrder() == QAudioFormat::LittleEndian);
} bool WavPcmFile::open()
{
if (!hasSupportedFormat()) {
setErrorString("Wav PCM supports only 8-bit unsigned samples "
"or 16-bit (or more) signed samples (in little endian)");
return false;
} else {
if (!QFile::open(ReadWrite | Truncate))
return false;
writeHeader();
return true;
}
} void WavPcmFile::writeHeader()
{
QDataStream out(this);
out.setByteOrder(QDataStream::LittleEndian); // RIFF chunk
out.writeRawData("RIFF", 4);
out << quint32(0); // Placeholder for the RIFF chunk size (filled by close())
out.writeRawData("WAVE", 4); // Format description chunk
out.writeRawData("fmt ", 4);
out << quint32(16); // "fmt " chunk size (always 16 for PCM)
out << quint16(1); // data format (1 => PCM)
out << quint16(format.channelCount());
out << quint32(format.sampleRate());
out << quint32(format.sampleRate() * format.channelCount()
* format.sampleSize() / 8 ); // bytes per second
out << quint16(format.channelCount() * format.sampleSize() / 8); // Block align
out << quint16(format.sampleSize()); // Significant Bits Per Sample // Data chunk
out.writeRawData("data", 4);
out << quint32(0); // Placeholder for the data chunk size (filled by close()) Q_ASSERT(pos() == 44); // Must be 44 for WAV PCM
} void WavPcmFile::close()
{
// Fill the header size placeholders
quint32 fileSize = size(); QDataStream out(this);
// RIFF chunk size
seek(4);
out << quint32(fileSize - 8); // data chunk size
seek(40);
out << quint32(fileSize - 44); QFile::close();
} AudioInfo::AudioInfo(const QAudioFormat &format, QObject *parent, const QString &filename)
: QIODevice(parent)
, m_format(format)
, m_maxAmplitude(0)
, m_level(0.0) {
switch (m_format.sampleSize()) {
case 8:
switch (m_format.sampleType()) {
case QAudioFormat::UnSignedInt:
m_maxAmplitude = 255;
break;
case QAudioFormat::SignedInt:
m_maxAmplitude = 127;
break;
default:
break;
}
break;
case 16:
switch (m_format.sampleType()) {
case QAudioFormat::UnSignedInt:
m_maxAmplitude = 65535;
break;
case QAudioFormat::SignedInt:
m_maxAmplitude = 32767;
break;
default:
break;
}
break;
default:
break;
} m_file = new WavPcmFile(filename,format,this); } AudioInfo::~AudioInfo()
{
} void AudioInfo::start()
{
m_file->open();
open(QIODevice::WriteOnly);
} void AudioInfo::stop()
{
close();
m_file->close();
} qint64 AudioInfo::readData(char *data, qint64 maxlen)
{
Q_UNUSED(data)
Q_UNUSED(maxlen) return 0;
} qint64 AudioInfo::writeData(const char *data, qint64 len)
{
if (m_maxAmplitude) {
Q_ASSERT(m_format.sampleSize() % 8 == 0);
const int channelBytes = m_format.sampleSize() / 8;
const int sampleBytes = m_format.channelCount() * channelBytes;
Q_ASSERT(len % sampleBytes == 0);
const int numSamples = len / sampleBytes; quint16 maxValue = 0;
const unsigned char *ptr = reinterpret_cast<const unsigned char *>(data); for (int i = 0; i < numSamples; ++i) {
for(int j = 0; j < m_format.channelCount(); ++j) {
quint16 value = 0; if (m_format.sampleSize() == 8 && m_format.sampleType() == QAudioFormat::UnSignedInt) {
value = *reinterpret_cast<const quint8*>(ptr);
} else if (m_format.sampleSize() == 8 && m_format.sampleType() == QAudioFormat::SignedInt) {
value = qAbs(*reinterpret_cast<const qint8*>(ptr));
} else if (m_format.sampleSize() == 16 && m_format.sampleType() == QAudioFormat::UnSignedInt) {
if (m_format.byteOrder() == QAudioFormat::LittleEndian)
value = qFromLittleEndian<quint16>(ptr);
else
value = qFromBigEndian<quint16>(ptr);
} else if (m_format.sampleSize() == 16 && m_format.sampleType() == QAudioFormat::SignedInt) {
if (m_format.byteOrder() == QAudioFormat::LittleEndian)
value = qAbs(qFromLittleEndian<qint16>(ptr));
else
value = qAbs(qFromBigEndian<qint16>(ptr));
} maxValue = qMax(value, maxValue);
ptr += channelBytes;
}
} maxValue = qMin(maxValue, m_maxAmplitude);
m_level = qreal(maxValue) / m_maxAmplitude;
} m_file->write(data,len); emit update();
return len;
} RenderArea::RenderArea(QWidget *parent)
: QPushButton(parent)
{
setBackgroundRole(QPalette::Base);
setAutoFillBackground(true); m_level = 0;
setMinimumHeight(30);
setMinimumWidth(80); } void RenderArea::paintEvent(QPaintEvent * /* event */)
{
QPainter painter(this);
QPixmap pixmap = QPixmap(":/images/button_default.png").scaled(this->size());
painter.drawPixmap(this->rect(), pixmap); // painter.setPen(Qt::black);
// painter.drawRect(QRect(painter.viewport().left(),
// painter.viewport().top(),
// painter.viewport().right()-20,
// painter.viewport().bottom()-20));
if (m_level == 0.0)
return;
painter.setPen(Qt::darkGray);
int pos = ((painter.viewport().right()-20)-(painter.viewport().left()+11))*m_level;
for (int i = 0; i < 10; ++i) {
int x1 = painter.viewport().left()+11;
int y1 = painter.viewport().top()+10+i;
int x2 = painter.viewport().left()+20+pos;
int y2 = painter.viewport().top()+10+i;
if (x2 < painter.viewport().left()+10)
x2 = painter.viewport().left()+10; painter.drawLine(QPoint(x1+10, y1+10),QPoint(x2+10, y2+10));
}
} void RenderArea::setLevel(qreal value)
{
m_level = value;
repaint();
}

这个文件包括三个类

主要是写wav文件的

最后一个类是一个widget,能够检測音频的波动情况

mianWidget主要是操作语音识别等功能

#include "widget.h"
#include <QApplication>
mainWidget::mainWidget(QWidget *parent)
: QWidget(parent),curI(0),canmove(false),speechInput_AudioInput(NULL),
flacEncoder(NULL)
{
this->setFixedSize(440,300);
this->setWindowFlags(Qt::FramelessWindowHint);
curPath = QApplication::applicationDirPath();
curPath.chop(30);
frame_Speech = new QPushButton(this);
connect(frame_Speech,SIGNAL(clicked()),this,SLOT(pushButton_Speech_clicked()));
frame_Speech->resize(100,50);
frame_Speech->move(50,110);
frame_Speech->setStyleSheet("QPushButton {\
color: grey;\
image: url(:/images/speech_n.png) ;\
image-position: right center;\
border-image: url(:/images/button_default.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:pressed {\
image: url(:/images/speech_p.png);\
image-position: right center;\
border-image: url(:/images/button_press.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:hover {\
color:grey;\
image: url(:/images/speech_p.png);\
image-position: right center;\
border-image: url(:/images/button_press.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}");
qDebug()<<"11111111111"<<endl;
cancel_btn = new QPushButton(this);
connect(cancel_btn,SIGNAL(clicked()),this,SLOT(pushButton_SpeechCancel_clicked()));
cancel_btn->resize(100,50);
cancel_btn->move(50,110);
cancel_btn->setText(QString::fromUtf8("取消录音"));
cancel_btn->setStyleSheet("QPushButton {\
color: grey;\
border-image: url(:/images/button_default.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:pressed {\
color: grey;\
border-image: url(:/images/button_press.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:hover {\
color:grey;\
image-position: right center;\
border-image: url(:/images/button_press.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}");
cancel_btn->setEnabled(false);
cancel_btn->hide(); qDebug()<<"222222222222"<<endl;
textEdit_Input = new QTextEdit(this);
textEdit_Input->resize(180,200);
textEdit_Input->move(230,50);
textEdit_Input->setStyleSheet("QTextEdit {\
border: 2px solid gray;\
border-radius: 5px;\
padding: 0 8px;\
background: \"#649f12\";\
selection-background-color: darkgray;\
}"\
"QScrollBar:vertical {\
border: 1px solid grey;\
background: #32CC99;\
width: 13px;\
margin: 22px 0 22px 0;\
}"\
"QScrollBar::handle:vertical {\
background: darkgray;\
min-height: 20px;\
}"\
"QScrollBar::add-line:vertical {\
border: 2px solid grey;\
background: #32CC99;\
height: 20px;\
subcontrol-position: bottom;\
subcontrol-origin: margin;\
}"\
"QScrollBar::sub-line:vertical {\
border: 2px solid grey;\
background: #32CC99;\
height: 20px;\
subcontrol-position: top;\
subcontrol-origin: margin;\
}"\
"QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {\
border: 2px solid grey;\
width: 3px;\
height: 3px;\
background: darkgray;\
}"\
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\
background: none;\
}"); qDebug()<<"33333333333"<<endl;
label_Speech_Waiting = new QPushButton(this);
//connect(label_Speech_Waiting,SIGNAL(clicked()),this,SLOT(pushButton_SpeechCancel_clicked()));
label_Speech_Waiting->resize(100,50);
label_Speech_Waiting->move(50,110);
label_Speech_Waiting->setText(QString::fromUtf8("智能识别"));
label_Speech_Waiting->setStyleSheet("QPushButton {\
color: grey;\
border-image: url(:/images/button_default.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:pressed {\
color: grey;\
border-image: url(:/images/button_press.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:hover {\
color: grey;\
border-image: url(:/images/button_press.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}");
label_Speech_Waiting->hide();
protocol = new Protocol();
connect(protocol,SIGNAL(Signal_SPEECH(int,QString,double)),this,SLOT(slotGoogleApiData(int,QString,double))); speechArea = new RenderArea(this);
speechArea->resize(100,50);
speechArea->move(50,200);
speechArea->hide(); close_btn = new QPushButton(this);
connect(close_btn,SIGNAL(clicked()),this,SLOT(pushButton_Close_Clicked()));
close_btn->resize(25,25);
close_btn->move(this->width()-40,10);
close_btn->setStyleSheet("QPushButton {\
color: grey;\
border-image: url(:/images/close_d.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:pressed {\
color: grey;\
border-image: url(:/images/close_h.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}"\
"QPushButton:hover {\
color: grey;\
border-image: url(:/images/close_p.png) 3 10 3 10;\
border-top: 3px transparent;\
border-bottom: 3px transparent;\
border-right: 10px transparent;\
border-left: 10px transparent;\
}");
timer = new QTimer(this);
connect(timer, SIGNAL(timeout()), this, SLOT(slotUpdate()));
//timer->start(500);
} mainWidget::~mainWidget()
{ }
// Init Speech Input
void mainWidget::initializeAudioInput()
{
speechInput_AudioFormat.setSampleRate(16000);
speechInput_AudioFormat.setChannelCount(1);
speechInput_AudioFormat.setSampleSize(16);
speechInput_AudioFormat.setSampleType(QAudioFormat::SignedInt);
speechInput_AudioFormat.setByteOrder(QAudioFormat::LittleEndian);
speechInput_AudioFormat.setCodec("audio/pcm"); speechInput_AudioDeviceInfo = QAudioDeviceInfo::defaultInputDevice();
bool isSupport = speechInput_AudioDeviceInfo.isFormatSupported(speechInput_AudioFormat);
qDebug()<<"isSupport "<<isSupport<<curPath;
if(!isSupport)
{
speechInput_AudioFormat = speechInput_AudioDeviceInfo.nearestFormat(speechInput_AudioFormat);
} curName = QDateTime::currentDateTime().toString("yyyyMMddhhmmss");
curName.append(".wav");
speechInput_AudioInfo = new AudioInfo(speechInput_AudioFormat, this, curPath+"/lib/"+curName);
connect(speechInput_AudioInfo, SIGNAL(update()), SLOT(refreshRender()));
qDebug()<<"isSupport======= "<<speechInput_AudioInfo->errorString();
createAudioInput();
} // Create Audio Input
void mainWidget::createAudioInput()
{
speechInput_AudioInput = new QAudioInput(speechInput_AudioDeviceInfo, speechInput_AudioFormat, this);
//connect(speechInput_AudioInput,SIGNAL(stateChanged(QAudio::State)),this,SLOT(slotStateChanged(QAudio::State)));
connect(speechInput_AudioInput,SIGNAL(notify()),this,SLOT(slotNotify()));
qDebug()<<"createAudioInput";
if(speechInput_AudioInput != NULL)
{
qDebug()<<"createAudioInput333";
speechInput_AudioInfo->start();
qDebug()<<"createAudioInput1111";
speechInput_AudioInput->start(speechInput_AudioInfo);
qDebug()<<"createAudioInput222";
}
} // Stop Audio Input
void mainWidget::stopAudioInput()
{
speechInput_AudioInput->stop();
speechInput_AudioInfo->stop(); speechInput_AudioInput->deleteLater();
speechInput_AudioInput = NULL;
} // Refresh Render
void mainWidget::refreshRender()
{
speechArea->setLevel(speechInput_AudioInfo->level());
speechArea->repaint();
} void mainWidget::slotGoogleApiData(int, QString str, double)
{
if(str != ""){
label_Speech_Waiting->hide();
textEdit_Input->setText(str);
}else{
label_Speech_Waiting->hide();
}
label_Speech_Waiting->setEnabled(true);
frame_Speech->show();
speechArea->hide();
}
/*
* QAudio::ActiveState 0 Audio data is being processed, this state is set after start() is called and while audio data is available to be processed.
QAudio::SuspendedState 1 The audio device is in a suspended state, this state will only be entered after suspend() is called.
QAudio::StoppedState 2 The audio device is closed, not processing any audio data
QAudio::IdleState
*/
void mainWidget::slotStateChanged(QAudio::State status)
{
//qDebug()<<"slotStateChanged "<<status<<speechInput_AudioInput->error();
switch (status) {
case QAudio::SuspendedState:
break;
case QAudio::ActiveState:
//QTimer::singleShot(5000,this,SLOT(pushButton_SpeechCancel_clicked()));
break;
case QAudio::StoppedState:
if (speechInput_AudioInput->error() != QAudio::NoError)
{
textEdit_Input->setText(QString::fromUtf8("音频设备未安装"));
} else
{
//QTimer::singleShot(5000,this,SLOT(pushButton_SpeechCancel_clicked()));
}
break;
default:
break;
}
speechInput_AudioInput->error();
} void mainWidget::slotUpdate()
{
curI +=0.1;
qDebug()<<"slotUpdate "<<curI;
if(curI >1.0)
{
timer->stop();
return;
}
if(speechArea != NULL)
{
qreal i = qrand()%10;
qreal ii = i/10;
qDebug()<<"slotUpdate I "<<curI;
speechArea->setLevel(curI);
speechArea->repaint();
}
} void mainWidget::slotNotify()
{
// qDebug()<<" slotNotify "<<speechInput_AudioInput->error();
} void mainWidget::paintEvent(QPaintEvent *)
{
QPainter painter(this);
QPixmap pixmap = QPixmap(":/images/main_bg.png").scaled(this->size());
painter.drawPixmap(this->rect(), pixmap);
} void mainWidget::mousePressEvent(QMouseEvent *e)
{
if( (e->pos().x()>= 10) && (e->pos().y()<=45) && (e->pos().x()<=this->x()-40))
{
canmove = true;
}
else
{
canmove = false;
e->accept();
return;
}
oldPos = e->pos();
e->accept();
} void mainWidget::mouseMoveEvent(QMouseEvent *e)
{
if(canmove)
{
//qDebug()<<this->pos()<<"\n"<<this->y();
move(e->globalPos() - oldPos);
}
e->accept();
} // Send Request
void mainWidget::flacEncoderFinished(int exitCode, QProcess::ExitStatus exitStatus)
{
qDebug()<<"flacEncoderFinished "<<exitStatus<<"\n"<<exitCode;
if (exitStatus == QProcess::NormalExit)
{
QByteArray flacData = flacEncoder->readAll();
protocol->Request_SPEECH(flacData);
}
//label_Speech_Waiting->hide();
//frame_Speech->hide();
flacEncoder->deleteLater();
flacEncoder = NULL;
} // Speech Input Button - Start Speech
void mainWidget::pushButton_Speech_clicked()
{
initializeAudioInput();
qDebug()<<"pushButton_Speech_clicked";
textEdit_Input->setReadOnly(true);
//label_Speech_Waiting->show();
frame_Speech->hide();
speechArea->show();
cancel_btn->show();
cancel_btn->setEnabled(true);
//QTimer::singleShot(5000,this,SLOT(pushButton_SpeechCancel_clicked()));
} void mainWidget::pushButton_Close_Clicked()
{
if(speechInput_AudioInput != NULL)
{
stopAudioInput();
}
qApp->quit();
} // Speech Input Button - Stop Speech
// Call flacEncoder
void mainWidget::pushButton_SpeechCancel_clicked()
{
frame_Speech->hide();
cancel_btn->hide();
speechArea->hide();
label_Speech_Waiting->show();
label_Speech_Waiting->setEnabled(false);
stopAudioInput();
//delete speechInput_AudioInput; //label_Speech_Waiting->setText(QString::fromUtf8("识别中..."));
//QString program = "./lib/flac.exe";
/*
* flac.exe -c --totally-silent -f -8 test.wav
*/
QStringList arguments;
#ifdef Q_OS_WIN
QString program = "./lib/flac.exe";
#else Q_OS_MAC
QString program = "/usr/local/bin/flac";
#endif
arguments << "-c" << "-f" << "-8" << curPath +"/lib/"+curName;
flacEncoder = new QProcess(this);
connect(flacEncoder,SIGNAL(finished(int,QProcess::ExitStatus)),this,SLOT(flacEncoderFinished(int, QProcess::ExitStatus))); flacEncoder->start(program, arguments);
qDebug()<<"arguments "<<program<<" "<<arguments;
}

录入音频后获取,然后再调用google speech api,返回识别结果


api调用演示样例:

#include "protocol.h"

Protocol::Protocol(QObject *parent) :
QObject(parent),Nt_SPEECH(NULL)
{
}
void Protocol::Request_SPEECH(QByteArray & audioData)
{
qDebug()<<"audioData "<<audioData.length();
if (!Nt_SPEECH)
{
QNetworkRequest request;
QString speechAPI = "http://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=zh-CN&maxresults=1";
request.setUrl(speechAPI); request.setRawHeader("User-Agent", "Mozilla/5.0");
request.setRawHeader("Content-Type", "audio/x-flac; rate=16000"); //qDebug(audioData); Nt_SPEECH = NetworkMGR.post(request, audioData);
connect(Nt_SPEECH, SIGNAL(readyRead()), this, SLOT(Read_SPEECH()));
}
} //*
// *{
// "status":0, /* 结果代码 */
// "id":"c421dee91abe31d9b8457f2a80ebca91-1", /* 识别编号 */
// "hypotheses": /* 如果,即结果 */
// [
// {
// "utterance":"下午好", /* 话语 */
// "confidence":0.2507637 /* 信心,即精确度 */
// },
// {
// "utterance":"午好", /* 话语 */
// "confidence":0.2507637 /* 信心。即精确度 */
// }
// ]
//}
//
void Protocol::Read_SPEECH()
{
QString content = QString::fromUtf8( Nt_SPEECH->readAll() );
qDebug()<<"content: "<<content;
emit Signal_SPEECH(0,content,0); disconnect(Nt_SPEECH, SIGNAL(readyRead()), this, SLOT(Read_SPEECH()));
//delete Nt_SPEECH;
Nt_SPEECH = NULL;
}

看下截图效果

初始化界面

watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvRXNvbnBv/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast" alt="">

watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvRXNvbnBv/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast" alt="">

最后一张是未识别,看来音频文件在噪音多的情况下 识别是非常不满意

PS:主要参考基于google speech api,谢谢,我要被移植到变化win 7平台和mac osx平台

基于Qt语音识别功能的更多相关文章

  1. 【Qt编程】基于Qt的词典开发系列<十四>自动补全功能

    最近写了一个查单词的类似有道词典的软件,里面就有一个自动补全功能(即当你输入一个字母时,就会出现几个候选项).这个自动补全功能十分常见,百度搜索关键词时就会出现.不过它们这些补全功能都是与你输入的进行 ...

  2. 基于QT的webkit与ExtJs开发CB/S结构的企业应用管理系统

      一:源起       1.何为CB/S的应用程序       C/S结构的应用程序,是客户端/服务端形式的应用程序,这种应用程序要在客户电脑上安装一个程序,客户使用这个程序与服务端通信,完成一定的 ...

  3. 百度语音识别REST API——通过使用Http网络请求方式获得语音识别功能

    百度语音识别通过REST API的方式给开发人员提供一个通用的HTTP接口,基于该接口,开发人员能够轻松的获取语音识别能力,本文档描写叙述了使用语音识别服务REST API的方法. 长处: 较之开发人 ...

  4. 基于QT的换肤整体解决方案(QSkinStyle)(提供Linux的XP风格)

    基于QT的换肤整体解决方案(QSkinStyle) 对QT这个成功的跨平台GUI库,本身内置了对换肤功能的实现,比如cleanlooks.plastique等跨平台风格:还有一些是和平台相关的风格,比 ...

  5. 基于Qt的开源音乐播放器(CZPlayer)

    CZPlayer CZPlayer是基于Qt开发的一款功能强大的音乐播放器,该播放器的论坛地址请点击here,目前CZPlayer已经是第四个版本了,历史版本也分别在我的github上, github ...

  6. 基于QT的一个简易的安防

    工程描述 opencv2.4.8 QT5 背景建模后,当有异物入侵时,把入侵的帧写到视频文件 使用BackgroundSubtractorMOG2背景建模 程序基于QT对话框 .pro #------ ...

  7. 基于Qt的P2P局域网聊天及文件传送软件设计

    基于Qt的P2P局域网聊天及文件传送软件设计 zouxy09@qq.com http://blog.csdn.net/zouxy09         这是我的<通信网络>的课程设计作业,之 ...

  8. 基于Qt的第三方库和控件

    ====================== 基于Qt的第三方库和控件 ======================     libQxt --------   http://dev.libqxt.o ...

  9. 基于Qt QGraphicsView的多点触摸绘图

    本应用于基于QGraphicsView框架,实现多点触摸. 工程仅仅演示了多点触摸绘图,源自我前段时间一款基于Qt的绘图软件. 工程结构: kmp.h 定义了枚举 slide.h/cpp 定义了派生于 ...

随机推荐

  1. 使用Jfree实现吧条形图,java代码

    使用Jfree实现吧条形图,java代码.它可能在生产的报告被常用,之后,主动生成自己的代码,可以随意查看.自由地配置图表的各种性质,为了达到他们的要求和目标 package test1; impor ...

  2. 高榕资本宾悦:未使用的企业家Testin云测试服务类故障

    高榕资本岳斌:创业者未使用Testin云測试服务属不合格 2014/10/09 · Testin · 开发人员訪谈 Testin云測与工信部等联合承办的ICT中国.2014高层论坛之移动开发人员分论坛 ...

  3. js运动动画

    原文:js运动动画 今天简单的学了一下js运动动画,再此感谢慕课网的这位老师http://www.imooc.com/view/167,讲的很不错. 下面是我整理出来的结果. 知识点一:速度动画. 1 ...

  4. 静态常量(static final)在class文件里是如何的呢?

    近期写项目遇到一个问题,来回折腾了几次,最终探究清楚了.不废话.上样例. 背景:由于项目小,没有使用配置文件,全部静态常量都放在Config.java里面了 public class Config { ...

  5. php方法综述除去换行符(PHP_EOL使用变量)

    一个小包裹,事实上,不同的平台具有不同的实现.为什么要这样.它可以是一个世界是多样的. 最初unix与世界把它包/n取代,但windows为了体现自己的不同.要使用/r/n,更有意思的是,mac随着/ ...

  6. poj 3013 Big Christmas Tree (dij+优先级队列优化 求最短)

    模板 意甲冠军:给你一个图,1始终根,每一方都有单价值,每个点都有权重新. 每个边缘的价格值 = sum(后继结点重)*单价方值. 最低价格要求树值,它构成了一棵树n-1条边的最小价值. 算法: 1. ...

  7. A == B ?(杭州电2054)

    A == B ? Time Limit: 1000/1000 MS (Java/Others)    Memory Limit: 32768/32768 K (Java/Others) Total S ...

  8. 让struts2和servlet共存

    由于struts2默认的是拦截全部的请求 由配置文件能够看出 <filter> <filter-name>struts2</filter-name> <fil ...

  9. 深入理解Android中ViewGroup

    文章目录   [隐藏] 一.ViewGroup是什么? 二.ViewGroup这个容器 2.1 添加View的算法 2.1.1 我们先来分析addViewInner方法: 2.1.2 addInArr ...

  10. 提升Mac os x 10.10+xcode6.1之后,Cocoapods发生故障的解决方案

    提升Mac OS X 10.10+Xcode 6.1之后.Cocoapods图书馆管理也依赖于相应升级.现在最新的Release版本号是 0.34.在之前的版本号.当数据库更新和管理,你会遇到一个错误 ...