- 我下载了binding sample。你可能会想跳过这一步,但如果你想让它工作,你真的必须从这个项目开始。
-
我使用 Xcode(我称之为 SpeechKitLibrary)创建了一个具有双重目的的 Objective-c 库 - 一个是定义 SpeechKitApplicationKey(这是 SpeechKit 需要的外部依赖项):
const unsigned char SpeechKitApplicationKey[] = {...};
另一个是定义一个利用 SpeechKit 框架的类,并与之链接。 (在 Xcode 中,在项目的 frameworks 部分添加 SpeechKit 框架)。
我写的 .m 文件看起来像这样......(你可以找出 .h 文件 - 超级简单)。我不是 100% 确定您需要所有这些,但我想确保此步骤产生的静态存档库将导入正确的符号。您也许可以通过某种方式避免这种情况,但在我的实验中,我发现我需要做这样的事情......
// the SpeechKitWrapper isn't actually used - rather, it is a way to exercise all the API's that
// the binding library needs from the SpeechKit framework, so that those can be linked into the generated .a file.
@implementation SpeechKitWrapper
@synthesize status;
- (id)initWithDelegate:(id <SKRecognizerDelegate>)delegate
{
self = [super init];
if (self) {
del = delegate;
[self setStatus:@"initializing"];
SpeechKit setupWithID:@"NMDPTRIAL_ogazitt20120220010133"
host:@"sandbox.nmdp.nuancemobility.net"
port:443
useSSL:NO
delegate:nil];
NSString *text = [NSString stringWithFormat:@"initialized. sessionid = %@", [SpeechKit sessionID]];
[self setStatus:text];
SKEarcon* earconStart = [SKEarcon earconWithName:@"beep.wav"];
[SpeechKit setEarcon:earconStart forType:SKStartRecordingEarconType];
voiceSearch = [[SKRecognizer alloc] initWithType:SKDictationRecognizerType
detection:SKLongEndOfSpeechDetection
language:@"en_US"
delegate:delegate];
text = [NSString stringWithFormat:@"recognizer connecting. sessionid = %@", [SpeechKit sessionID]];
[self setStatus:text];
}
return self;
}
@end
然后,我为三种不同的架构 - i386、arm6 和 arm7 编译/链接了这个静态存档。 BindingSample 中的 Makefile 是如何执行此操作的模板。但是网络是你得到三个库 - libSpeechKitLibrary-{i386,arm6,arm7}.a。然后,makefile 使用 OSX lipo(1) 工具创建一个通用库 (libSpeechKitLibraryUniversal.a)。
-
现在您才准备好创建绑定库。您可以在绑定示例中重用 AssemblyInfo.cs(这将展示如何在通用库上为所有架构创建导入 - 并将驱动一些编译标志)...
[assembly: LinkWith ("libSpeechKitLibraryUniversal.a", LinkTarget.Simulator | LinkTarget.ArmV6 | LinkTarget.ArmV7, ForceLoad = true)]
-
您根据 Makefile 使用 btouch 编译 ApiDefinition.cs 文件(我想我需要重复 StructsAndEnums.cs 中的一些信息才能使其工作)。注意 - 我没有使用的唯一功能是“SetEarcon”的东西 - 因为这是一个存档库而不是框架,我不能将 wav 捆绑为资源文件......我想不出了解如何让 SetEarcon 方法从我的应用程序包中接受资源。
using System;
using MonoTouch.Foundation;
namespace Nuance.SpeechKit
{
// SKEarcon.h
public enum SKEarconType
{
SKStartRecordingEarconType = 1,
SKStopRecordingEarconType = 2,
SKCancelRecordingEarconType = 3,
};
// SKRecognizer.h
public enum SKEndOfSpeechDetection
{
SKNoEndOfSpeechDetection = 1,
SKShortEndOfSpeechDetection = 2,
SKLongEndOfSpeechDetection = 3,
};
public static class SKRecognizerType
{
public static string SKDictationRecognizerType = "dictation";
public static string SKWebSearchRecognizerType = "websearch";
};
// SpeechKitErrors.h
public enum SpeechKitErrors
{
SKServerConnectionError = 1,
SKServerRetryError = 2,
SKRecognizerError = 3,
SKVocalizerError = 4,
SKCancelledError = 5,
};
// SKEarcon.h
[BaseType(typeof(NSObject))]
interface SKEarcon
{
[Export("initWithContentsOfFile:")]
IntPtr Constructor(string path);
[Static, Export("earconWithName:")]
SKEarcon FromName(string name);
}
// SKRecognition.h
[BaseType(typeof(NSObject))]
interface SKRecognition
{
[Export("results")]
string[] Results { get; }
[Export("scores")]
NSNumber[] Scores { get; }
[Export("suggestion")]
string Suggestion { get; }
[Export("firstResult")]
string FirstResult();
}
// SKRecognizer.h
[BaseType(typeof(NSObject))]
interface SKRecognizer
{
[Export("audioLevel")]
float AudioLevel { get; }
[Export ("initWithType:detection:language:delegate:")]
IntPtr Constructor (string type, SKEndOfSpeechDetection detection, string language, SKRecognizerDelegate del);
[Export("stopRecording")]
void StopRecording();
[Export("cancel")]
void Cancel();
/*
[Field ("SKSearchRecognizerType", "__Internal")]
NSString SKSearchRecognizerType { get; }
[Field ("SKDictationRecognizerType", "__Internal")]
NSString SKDictationRecognizerType { get; }
*/
}
[BaseType(typeof(NSObject))]
[Model]
interface SKRecognizerDelegate
{
[Export("recognizerDidBeginRecording:")]
void OnRecordingBegin (SKRecognizer recognizer);
[Export("recognizerDidFinishRecording:")]
void OnRecordingDone (SKRecognizer recognizer);
[Export("recognizer:didFinishWithResults:")]
[Abstract]
void OnResults (SKRecognizer recognizer, SKRecognition results);
[Export("recognizer:didFinishWithError:suggestion:")]
[Abstract]
void OnError (SKRecognizer recognizer, NSError error, string suggestion);
}
// speechkit.h
[BaseType(typeof(NSObject))]
interface SpeechKit
{
[Static, Export("setupWithID:host:port:useSSL:delegate:")]
void Initialize(string id, string host, int port, bool useSSL, [NullAllowed] SpeechKitDelegate del);
[Static, Export("destroy")]
void Destroy();
[Static, Export("sessionID")]
string GetSessionID();
[Static, Export("setEarcon:forType:")]
void SetEarcon(SKEarcon earcon, SKEarconType type);
}
[BaseType(typeof(NSObject))]
[Model]
interface SpeechKitDelegate
{
[Export("destroyed")]
void Destroyed();
}
[BaseType(typeof(NSObject))]
interface SpeechKitWrapper
{
[Export("initWithDelegate:")]
IntPtr Constructor(SKRecognizerDelegate del);
[Export("status")]
string Status { get; set; }
}
}
-
您现在有了一个可以被单点触控应用程序项目引用的程序集。现在重要的是要记住链接所有依赖的框架(不仅是 SpeeckKit,还有 SK 的依赖)——你可以通过传递 mtouch 一些额外的参数来做到这一点:
-gcc_flags "-F<insert_framework_path_here> -framework SpeechKit -framework SystemConfiguration -framework Security -framework AVFoundation -framework AudioToolbox"
就是这样,伙计们!希望这对您有所帮助...