前言

Linux下做Qt工程经常遇到需要依赖某某动态库的问题,而且这个动态库还一般不内置到工程里,都是放在什么/usr/之类的地方,如果动态库没有安装的话,不管是编译和运行都会有问题。
所以就要用QLibrary来做动态加载,就彻底和动态库解耦了,工程里设计到动态库相关的部分,也不会直接#include之类的,相关逻辑也会先判断动态加载有没有成功

示例

这里以动态加载一个语音录入库为例:
头文件:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#include <QObject>
#include <QLibrary>
#include <functional>
//speech,这些本来是库的头文件,现在不能直接引用
//#include <kylin-ai/coreai/speech/recognizer.h>
//#include <kylin-ai/coreai/speech/synthesizer.h>
//#include <kylin-ai/coreai/speech/result.h>
//#include <kylin-ai/coreai/speech/audioconfig.h>

//这些本来是头文件里的一些定义,也要搬过来,不然库里某些接口的返回值是这些,运行会有问题
typedef enum
{
SPEECH_ERROR_OCCURRED = 1,
SPEECH_RECOGNITION_STARTED = 2,
SPEECH_RECOGNIZING = 3,
SPEECH_RECOGNIZED = 4,
SPEECH_RECOGNITION_COMPLETED = 5,
SPEECH_SYNTHESIS_STARTED = 6,
SPEECH_SYNTHESIZING = 7,
SPEECH_SYNTHESIS_COMPLETED = 8,
} SpeechResultReason;

typedef struct _SpeechRecognitionSession SpeechRecognitionSession;
typedef struct _SpeechRecognitionResult SpeechRecognitionResult;
typedef struct _SpeechSynthesisResult SpeechSynthesisResult;
typedef struct _AudioConfig AudioConfig;

typedef void (*SpeechRecognitionResultCallback)(SpeechRecognitionResult *result, void *user_data);
typedef void (*SpeechSynthesisResultCallback)(SpeechSynthesisResult *result, void *user_data);
typedef void* (*CreateSessionFunc)();
typedef int (*InitSessionFunc)(void*);
typedef const char *(*GetTextFunc)(SpeechRecognitionResult *result);
typedef SpeechResultReason(*GetReasonFunc)(SpeechRecognitionResult *result);
typedef void (*SetCallbackFunc)(void*, void(*callback)(SpeechRecognitionResult*, void*), void*);
typedef void (*StartRecognitionFunc)(void*);
typedef void (*StopRecognitionFunc)(void*);
typedef void (*SetAudioConfigFunc)(void*, void*);
typedef AudioConfig* (*CreateAudioConfigFunc)();

class QMutex;
class PUBLICDATA_EXPORT Speech : public QObject
{
Q_OBJECT
public:
static Speech* getInstance();
void beginListening();
void endListening();
bool isSpeechSetup();
bool isSpeechListening();

protected:
explicit Speech(QObject *parent = nullptr);

signals:
void sigSpeechCallBack(const char*);
void warning(QString);

private:
void loadSpeechLibrary();
static void callback(SpeechRecognitionResult *result, void *user_data);

private:
static Speech* m_pSpeech;
static QMutex mutex_m;

private:
QLibrary m_speechLibrary;
CreateSessionFunc speech_recognizer_create_session = nullptr;
InitSessionFunc speech_recognizer_init_session = nullptr;
SetCallbackFunc speech_recognizer_result_set_callback = nullptr;
StartRecognitionFunc speech_recognizer_start_continuous_recognition_async = nullptr;
StopRecognitionFunc speech_recognizer_stop_continuous_recognition_async = nullptr;
GetTextFunc speech_recognition_result_get_text = nullptr;
GetReasonFunc speech_recognition_result_get_reason = nullptr;
CreateAudioConfigFunc audio_config_create_continuous_audio_input_from_default_microphone = nullptr;
SetAudioConfigFunc speech_recognizer_set_audio_config = nullptr;

void *m_session = nullptr;

bool m_bSpeechSetup;
bool m_isListening;
};

实现:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#include "speech.h"
#include <QMutex>
#include <QDebug>
#include <QMessageBox>
#include <QDBusInterface>
#include <QDBusReply>
#include <glib.h>

Speech* Speech::m_pSpeech = nullptr;
QMutex Speech::mutex_m;

Speech::Speech(QObject *parent) : QObject(parent)
{
m_bSpeechSetup = false;
m_isListening = false;
loadSpeechLibrary();
}

Speech *Speech::getInstance()
{
if(m_pSpeech == nullptr)
{
mutex_m.lock();

if(m_pSpeech == nullptr)
{
m_pSpeech = new Speech();
}

mutex_m.unlock();
}

return m_pSpeech;
}

bool Speech::isSpeechSetup()
{
return m_bSpeechSetup;
}

bool Speech::isSpeechListening()
{
return m_isListening;
}

void Speech::loadSpeechLibrary()
{
m_speechLibrary.setFileName("/usr/lib/x86_64-linux-gnu/libkysdk-coreai-speech.so");
if(!m_speechLibrary.load())
{
qWarning() << "Failed to load speech library:" << m_speechLibrary.errorString();
return;
}

// 获取函数符号
speech_recognizer_create_session = reinterpret_cast<CreateSessionFunc>(m_speechLibrary.resolve("speech_recognizer_create_session"));
speech_recognizer_init_session = reinterpret_cast<InitSessionFunc>(m_speechLibrary.resolve("speech_recognizer_init_session"));
speech_recognizer_result_set_callback = reinterpret_cast<SetCallbackFunc>(m_speechLibrary.resolve("speech_recognizer_result_set_callback"));
speech_recognizer_start_continuous_recognition_async = reinterpret_cast<StartRecognitionFunc>(m_speechLibrary.resolve("speech_recognizer_start_continuous_recognition_async"));
speech_recognizer_stop_continuous_recognition_async = reinterpret_cast<StopRecognitionFunc>(m_speechLibrary.resolve("speech_recognizer_stop_continuous_recognition_async"));
speech_recognizer_set_audio_config = reinterpret_cast<SetAudioConfigFunc>(m_speechLibrary.resolve("speech_recognizer_set_audio_config"));
speech_recognition_result_get_text = reinterpret_cast<GetTextFunc>(m_speechLibrary.resolve("speech_recognition_result_get_text"));
speech_recognition_result_get_reason = reinterpret_cast<GetReasonFunc>(m_speechLibrary.resolve("speech_recognition_result_get_reason"));
audio_config_create_continuous_audio_input_from_default_microphone = reinterpret_cast<CreateAudioConfigFunc>(m_speechLibrary.resolve("audio_config_create_continuous_audio_input_from_default_microphone"));

if(!speech_recognizer_create_session || !speech_recognizer_init_session || !speech_recognition_result_get_text ||
!speech_recognizer_result_set_callback || !speech_recognizer_start_continuous_recognition_async ||
!speech_recognizer_stop_continuous_recognition_async || !speech_recognizer_set_audio_config)
{
qWarning() << "Failed to resolve one or more speech library symbols";
m_speechLibrary.unload();
}
}

void Speech::beginListening()
{
m_isListening = true;
if (!speech_recognizer_create_session || !speech_recognizer_init_session ||
!speech_recognizer_result_set_callback || !speech_recognizer_start_continuous_recognition_async ||
!speech_recognizer_set_audio_config)
{
qWarning() << "Speech library functions not loaded";
m_isListening = false;
emit warning(tr("Voice-to-text is currently unavailable."));
return;
}

m_session = speech_recognizer_create_session();
int result = speech_recognizer_init_session(m_session);
qDebug() << "result" << result;
if(result == 1)
{
m_isListening = false;
emit warning(tr("The network is abnormal. Check whether the system is connected to the network"));
return;
}
if (result != 0 && result != 1)
{
m_isListening = false;
emit warning(tr("Voice-to-text is currently unavailable."));
return;
}
speech_recognizer_result_set_callback(m_session, callback, this);
auto *config = audio_config_create_continuous_audio_input_from_default_microphone();
speech_recognizer_set_audio_config(m_session, config);


speech_recognizer_start_continuous_recognition_async(m_session);
GMainLoop *pMainLoop = g_main_loop_new(nullptr, false);
g_main_loop_run(pMainLoop);
g_main_loop_unref(pMainLoop);
}

void Speech::endListening()
{
if(speech_recognizer_stop_continuous_recognition_async)
{
speech_recognizer_stop_continuous_recognition_async(m_session);
m_isListening = false;
}
}

void Speech::callback(SpeechRecognitionResult *result, void *user_data)
{
Speech *self = static_cast<Speech*>(user_data);
qDebug() << "************************************" << self->speech_recognition_result_get_text(result);

if(self->speech_recognition_result_get_reason(result) == SPEECH_RECOGNIZED)//这个是正在进行语音修正后的结果
{
self->sigSpeechCallBack(self->speech_recognition_result_get_text(result));
}
}