求大神 求大神解决,长按图片如何拖动到指定位置
qq1790542492
依照惯例,先上代码:
#pragma once
#ifdef WIN32
#include <windows.h>
#include <WinSock.h>
#else
#include <sys/socket.h>
#include <fcntl.h>
#include <errno.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#define SOCKET int
#define SOCKET_ERROR -1
#define INVALID_SOCKET -1
#endif
#ifndef CHECKF
#define CHECKF(x) \
do \
{ \
if (!(x)) { \
log_msg("CHECKF", #x, __FILE__, __LINE__); \
return 0; \
} \
} while (0)
#endif
#define _MAX_MSGSIZE 16 * 1024 // 暂定一个消息最大为16k
#define BLOCKSECONDS 30 // INIT函数阻塞时间
#define INBUFSIZE (64*1024) //? 具体尺寸根据剖面报告调整 接收数据的缓存
#define OUTBUFSIZE (8*1024) //? 具体尺寸根据剖面报告调整。 发送数据的缓存,当不超过8K时,FLUSH只需要SEND一次
class CGameSocket {
public:
CGameSocket(void);
bool Create(const char* pszServerIP, int nServerPort, int nBlockSec = BLOCKSECONDS, bool bKeepAlive = false);
bool SendMsg(void* pBuf, int nSize);
bool ReceiveMsg(void* pBuf, int& nSize);
bool Flush(void);
bool Check(void);
void Destroy(void);
SOCKET GetSocket(void) const { return m_sockClient; }
private:
bool recvFromSock(void); // 从网络中读取尽可能多的数据
bool hasError(); // 是否发生错误,注意,异步模式未完成非错误
void closeSocket();
SOCKET m_sockClient;
// 发送数据缓冲
char m_bufOutput[OUTBUFSIZE]; //? 可优化为指针数组
int m_nOutbufLen;
// 环形缓冲区
char m_bufInput[INBUFSIZE];
int m_nInbufLen;
int m_nInbufStart; // INBUF使用循环式队列,该变量为队列起点,0 - (SIZE-1)
};
#include "stdafx.h"
#include "Socket.h"
CGameSocket::CGameSocket()
{
// 初始化
memset(m_bufOutput, 0, sizeof(m_bufOutput));
memset(m_bufInput, 0, sizeof(m_bufInput));
}
void CGameSocket::closeSocket()
{
#ifdef WIN32
closesocket(m_sockClient);
WSACleanup();
#else
close(m_sockClient);
#endif
}
bool CGameSocket::Create(const char* pszServerIP, int nServerPort, int nBlockSec, bool bKeepAlive /*= FALSE*/)
{
// 检查参数
if(pszServerIP == 0 || strlen(pszServerIP) > 15) {
return false;
}
#ifdef WIN32
WSADATA wsaData;
WORD version = MAKEWORD(2, 0);
int ret = WSAStartup(version, &wsaData);//win sock start up
if (ret != 0) {
return false;
}
#endif
// 创建主套接字
m_sockClient = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if(m_sockClient == INVALID_SOCKET) {
closeSocket();
return false;
}
// 设置SOCKET为KEEPALIVE
if(bKeepAlive)
{
int optval=1;
if(setsockopt(m_sockClient, SOL_SOCKET, SO_KEEPALIVE, (char *) &optval, sizeof(optval)))
{
closeSocket();
return false;
}
}
#ifdef WIN32
DWORD nMode = 1;
int nRes = ioctlsocket(m_sockClient, FIONBIO, &nMode);
if (nRes == SOCKET_ERROR) {
closeSocket();
return false;
}
#else
// 设置为非阻塞方式
fcntl(m_sockClient, F_SETFL, O_NONBLOCK);
#endif
unsigned long serveraddr = inet_addr(pszServerIP);
if(serveraddr == INADDR_NONE) // 检查IP地址格式错误
{
closeSocket();
return false;
}
sockaddr_in addr_in;
memset((void *)&addr_in, 0, sizeof(addr_in));
addr_in.sin_family = AF_INET;
addr_in.sin_port = htons(nServerPort);
addr_in.sin_addr.s_addr = serveraddr;
if(connect(m_sockClient, (sockaddr *)&addr_in, sizeof(addr_in)) == SOCKET_ERROR) {
if (hasError()) {
closeSocket();
return false;
}
else // WSAWOLDBLOCK
{
timeval timeout;
timeout.tv_sec = nBlockSec;
timeout.tv_usec = 0;
fd_set writeset, exceptset;
FD_ZERO(&writeset);
FD_ZERO(&exceptset);
FD_SET(m_sockClient, &writeset);
FD_SET(m_sockClient, &exceptset);
int ret = select(FD_SETSIZE, NULL, &writeset, &exceptset, &timeout);
if (ret == 0 || ret < 0) {
closeSocket();
return false;
} else // ret > 0
{
ret = FD_ISSET(m_sockClient, &exceptset);
if(ret) // or (!FD_ISSET(m_sockClient, &writeset)
{
closeSocket();
return false;
}
}
}
}
m_nInbufLen = 0;
m_nInbufStart = 0;
m_nOutbufLen = 0;
struct linger so_linger;
so_linger.l_onoff = 1;
so_linger.l_linger = 500;
setsockopt(m_sockClient, SOL_SOCKET, SO_LINGER, (const char*)&so_linger, sizeof(so_linger));
return true;
}
bool CGameSocket::SendMsg(void* pBuf, int nSize)
{
if(pBuf == 0 || nSize <= 0) {
return false;
}
if (m_sockClient == INVALID_SOCKET) {
return false;
}
// 检查通讯消息包长度
int packsize = 0;
packsize = nSize;
// 检测BUF溢出
if(m_nOutbufLen + nSize > OUTBUFSIZE) {
// 立即发送OUTBUF中的数据,以清空OUTBUF。
Flush();
if(m_nOutbufLen + nSize > OUTBUFSIZE) {
// 出错了
Destroy();
return false;
}
}
// 数据添加到BUF尾
memcpy(m_bufOutput + m_nOutbufLen, pBuf, nSize);
m_nOutbufLen += nSize;
return true;
}
bool CGameSocket::ReceiveMsg(void* pBuf, int& nSize)
{
//检查参数
if(pBuf == NULL || nSize <= 0) {
return false;
}
if (m_sockClient == INVALID_SOCKET) {
return false;
}
// 检查是否有一个消息(小于2则无法获取到消息长度)
if(m_nInbufLen < 2) {
// 如果没有请求成功 或者 如果没有数据则直接返回
if(!recvFromSock() || m_nInbufLen < 2) { // 这个m_nInbufLen更新了
return false;
}
}
// 计算要拷贝的消息的大小(一个消息,大小为整个消息的第一个16字节),因为环形缓冲区,所以要分开计算
int packsize = (unsigned char)m_bufInput[m_nInbufStart] +
(unsigned char)m_bufInput[(m_nInbufStart + 1) % INBUFSIZE] * 256; // 注意字节序,高位+低位
// 检测消息包尺寸错误 暂定最大16k
if (packsize <= 0 || packsize > _MAX_MSGSIZE) {
m_nInbufLen = 0; // 直接清空INBUF
m_nInbufStart = 0;
return false;
}
// 检查消息是否完整(如果将要拷贝的消息大于此时缓冲区数据长度,需要再次请求接收剩余数据)
if (packsize > m_nInbufLen) {
// 如果没有请求成功 或者 依然无法获取到完整的数据包 则返回,直到取得完整包
if (!recvFromSock() || packsize > m_nInbufLen) { // 这个m_nInbufLen已更新
return false;
}
}
// 复制出一个消息
if(m_nInbufStart + packsize > INBUFSIZE) {
// 如果一个消息有回卷(被拆成两份在环形缓冲区的头尾)
// 先拷贝环形缓冲区末尾的数据
int copylen = INBUFSIZE - m_nInbufStart;
memcpy(pBuf, m_bufInput + m_nInbufStart, copylen);
// 再拷贝环形缓冲区头部的剩余部分
memcpy((unsigned char *)pBuf + copylen, m_bufInput, packsize - copylen);
nSize = packsize;
} else {
// 消息没有回卷,可以一次拷贝出去
memcpy(pBuf, m_bufInput + m_nInbufStart, packsize);
nSize = packsize;
}
// 重新计算环形缓冲区头部位置
m_nInbufStart = (m_nInbufStart + packsize) % INBUFSIZE;
m_nInbufLen -= packsize;
return true;
}
bool CGameSocket::hasError()
{
#ifdef WIN32
int err = WSAGetLastError();
if(err != WSAEWOULDBLOCK) {
#else
int err = errno;
if(err != EINPROGRESS && err != EAGAIN) {
#endif
return true;
}
return false;
}
// 从网络中读取尽可能多的数据,实际向服务器请求数据的地方
bool CGameSocket::recvFromSock(void)
{
if (m_nInbufLen >= INBUFSIZE || m_sockClient == INVALID_SOCKET) {
return false;
}
// 接收第一段数据
int savelen, savepos; // 数据要保存的长度和位置
if(m_nInbufStart + m_nInbufLen < INBUFSIZE) { // INBUF中的剩余空间有回绕
savelen = INBUFSIZE - (m_nInbufStart + m_nInbufLen); // 后部空间长度,最大接收数据的长度
} else {
savelen = INBUFSIZE - m_nInbufLen;
}
// 缓冲区数据的末尾
savepos = (m_nInbufStart + m_nInbufLen) % INBUFSIZE;
CHECKF(savepos + savelen <= INBUFSIZE);
int inlen = recv(m_sockClient, m_bufInput + savepos, savelen, 0);
if(inlen > 0) {
// 有接收到数据
m_nInbufLen += inlen;
if (m_nInbufLen > INBUFSIZE) {
return false;
}
// 接收第二段数据(一次接收没有完成,接收第二段数据)
if(inlen == savelen && m_nInbufLen < INBUFSIZE) {
int savelen = INBUFSIZE - m_nInbufLen;
int savepos = (m_nInbufStart + m_nInbufLen) % INBUFSIZE;
CHECKF(savepos + savelen <= INBUFSIZE);
inlen = recv(m_sockClient, m_bufInput + savepos, savelen, 0);
if(inlen > 0) {
m_nInbufLen += inlen;
if (m_nInbufLen > INBUFSIZE) {
return false;
}
} else if(inlen == 0) {
Destroy();
return false;
} else {
// 连接已断开或者错误(包括阻塞)
if (hasError()) {
Destroy();
return false;
}
}
}
} else if(inlen == 0) {
Destroy();
return false;
} else {
// 连接已断开或者错误(包括阻塞)
if (hasError()) {
Destroy();
return false;
}
}
return true;
}
bool CGameSocket::Flush(void) //? 如果 OUTBUF > SENDBUF 则需要多次SEND()
{
if (m_sockClient == INVALID_SOCKET) {
return false;
}
if(m_nOutbufLen <= 0) {
return true;
}
// 发送一段数据
int outsize;
outsize = send(m_sockClient, m_bufOutput, m_nOutbufLen, 0);
if(outsize > 0) {
// 删除已发送的部分
if(m_nOutbufLen - outsize > 0) {
memcpy(m_bufOutput, m_bufOutput + outsize, m_nOutbufLen - outsize);
}
m_nOutbufLen -= outsize;
if (m_nOutbufLen < 0) {
return false;
}
} else {
if (hasError()) {
Destroy();
return false;
}
}
return true;
}
bool CGameSocket::Check(void)
{
// 检查状态
if (m_sockClient == INVALID_SOCKET) {
return false;
}
char buf[1];
int ret = recv(m_sockClient, buf, 1, MSG_PEEK);
if(ret == 0) {
Destroy();
return false;
} else if(ret < 0) {
if (hasError()) {
Destroy();
return false;
} else { // 阻塞
return true;
}
} else { // 有数据
return true;
}
return true;
}
void CGameSocket::Destroy(void)
{
// 关闭
struct linger so_linger;
so_linger.l_onoff = 1;
so_linger.l_linger = 500;
int ret = setsockopt(m_sockClient, SOL_SOCKET, SO_LINGER, (const char*)&so_linger, sizeof(so_linger));
closeSocket();
m_sockClient = INVALID_SOCKET;
m_nInbufLen = 0;
m_nInbufStart = 0;
m_nOutbufLen = 0;
memset(m_bufOutput, 0, sizeof(m_bufOutput));
memset(m_bufInput, 0, sizeof(m_bufInput));
}
// 发送消息
bSucSend = m_pSocket->SendMsg(buf, nLen);
// 接收消息处理(放到游戏主循环中,每帧处理)
if (!m_pSocket) {
return;
}
if (!m_pSocket->Check()) {
m_pSocket = NULL;
// 掉线了
onConnectionAbort();
return;
}
// 发送数据(向服务器发送消息)
m_pSocket->Flush();
// 接收数据(取得缓冲区中的所有消息,直到缓冲区为空)
while (true)
{
char buffer[_MAX_MSGSIZE] = { 0 };
int nSize = sizeof(buffer);
char* pbufMsg = buffer;
if(m_pSocket == NULL)
{
break;
}
if (!m_pSocket->ReceiveMsg(pbufMsg, nSize)) {
break;
}
while (true)
{
MsgHead* pReceiveMsg = (MsgHead*)(pbufMsg);
uint16 dwCurMsgSize = pReceiveMsg->usSize;
// CCLOG("msgsize: %d", dwCurMsgSize);
if((int)dwCurMsgSize > nSize || dwCurMsgSize <= 0) { // broken msg
break;
}
CMessageSubject::instance().OnMessage((const char*)pReceiveMsg, pReceiveMsg->usSize);
pbufMsg += dwCurMsgSize;
nSize -= dwCurMsgSize;
if(nSize <= 0) {
break;
}
}
}这样的一个Socket封装,适用于windows mac ios android等平台, Socket处理是异步非阻塞的,所以可以放心的放到主线程处理消息, 最大支持64k的接收消息缓冲(一般一个消息不可能大于3k)。
这里展示这个,目的并不是说这个封装有多么优异,多么高科技,多么牛x。 恰恰是想表达它的简单。 这个简单的封装完全可以胜任一个mmo客户端的消息底层(注意是客户端,服务器对消息底层的性能要求要远远大于客户端),甚至是魔兽世界这类的大型mmo都可以用这么一个小的封装来做消息底层。
对于游戏客户端消息底层的要求非常简单,根本不需要boost::asio什么的开源库。
1、非阻塞模型,这样我才放心把消息处理放到主线程,多线程处理消息其实很浪费。不知道得多大型的mmo才会用到。
2、消息接收缓存处理,避免大消息被截掉。
3、没了,剩下的一些特殊处理应该是上层逻辑来考虑的。比如掉线重连等。
1.Camera架构包括客户端和服务端,他们之间的通信采用Binder机制实现。
Camera的实现主要包括本地代码和Java代码两个层次:
Camera本地框架:
frameworks/native/include/ui
frameworks/native/libs/ui
frameworks/av/camera/
Camera的本地实现包含在上述目录中,这部分内容被编译生成库libui.so和libcamera_client.so。
Camera服务部分:
frameworks/av/services/camera/libcameraservice
这部分编译生成libcameraservice.so。
Camera HAL:
frameworks/av/camera
frameworks/av/services/camera/libcameraservice/CameraHardwareInterface.h
CameraHardwareInterface.h是HAL接口的定义,需要各个系统根据自己的情况实现。
2.AndroidCamera采用C/S架构,client与server两个独立的线程之间使用Binder通信。这里将介绍Camera从设备开机,到进入相机应用是如何完成初始化工作的。
首先既然Camera是利用binder通信,它肯定要将它的service注册到ServiceManager里面,以备后续Client引用,那么这一步是在哪里进行的呢?在frameworks/av/media/mediaserver/main_mediaserver.cpp下有个main函数,可以用来注册媒体服务。在这里,CameraService完成了服务的注册。
intmain(int argc, char** argv)
{
sp<ProcessState>proc(ProcessState::self());
sp<IServiceManager> sm= defaultServiceManager();
ALOGI("ServiceManager:%p", sm.get());
AudioFlinger::instantiate();
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
可是我们到CameraService文件里面却找不到instantiate()这个函数,它在哪?继续追到它的一个父类BinderService
template<typenameSERVICE>
classBinderService
{
public:
static status_t publish(bool allowIsolated =false) {
sp<IServiceManager>sm(defaultServiceManager());
returnsm->addService(String16(SERVICE::getServiceName()), new SERVICE(),allowIsolated);
}
static void publishAndJoinThreadPool(boolallowIsolated = false) {
sp<IServiceManager>sm(defaultServiceManager());
sm->addService(String16(SERVICE::getServiceName()),new SERVICE(), allowIsolated);
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
staticvoid instantiate(){ publish(); }
static status_t shutdown() {
return NO_ERROR;
}
};
可以发现在publish()函数中,CameraService完成服务的注册。这里面有个SERVICE,源码中有说明
template<typenameSERVICE>
这表示SERVICE是个模板,这里是注册CameraService,所以可以用CameraService代替
sm->addService(String16(SERVICE::getServiceName()),new SERVICE(), allowIsolated);
这样,Camera就在ServiceManager完成服务注册,提供给client随时使用。
Main_MediaServer主函数由init.rc在启动是调用,所以在设备开机的时候Camera就会注册一个服务,用作binder通信。
servicemedia /system/bin/mediaserver
class main
user media
group audio camera inetnet_bt net_bt_admin net_bw_acct drmrpc
ioprio rt 4
Binder服务已注册,那接下来就看看client如何连上server端,并打开camera模块。
咱们先从testingcameraapp的源码入手。在setUpCamera()函数中专门有一个open(mCameraId)函数进入framework层,调用frameworks/base/core/java/android/hardware/Camera.java类的open方法。
publicstatic Camera open(int cameraId) {
return newCamera(cameraId);
}
这里调用了Camera的构造函数,在看看构造函数
Camera(int cameraId) {
mShutterCallback = null;
mRawImageCallback = null;
mJpegCallback = null;
mPreviewCallback = null;
mPostviewCallback = null;
mZoomListener = null;
Looper looper;
if ((looper =Looper.myLooper()) != null) {
mEventHandler = newEventHandler(this, looper);
} else if ((looper =Looper.getMainLooper()) != null) {
mEventHandler = newEventHandler(this, looper);
} else {
mEventHandler = null;
}
native_setup(newWeakReference<Camera>(this), cameraId);
}
好,终于来到JNI了,继续看camera的JNI文件frameworks/base/core/jni/android_hardware_Camera.cpp
// connect to cameraservice
static voidandroid_hardware_Camera_native_setup(JNIEnv *env, jobject thiz,
jobject weak_this,jint cameraId)
{
sp<Camera>camera = Camera::connect(cameraId);
if (camera == NULL) {
jniThrowRuntimeException(env,"Fail to connect to camera service");
return;
}
// make sure camerahardware is alive
if(camera->getStatus() != NO_ERROR) {
jniThrowRuntimeException(env,"Camera initialization failed");
return;
}
jclass clazz =env->GetObjectClass(thiz);
if (clazz == NULL) {
jniThrowRuntimeException(env,"Can't find android/hardware/Camera");
return;
}
// We use a weakreference so the Camera object can be garbage collected.
// The reference isonly used as a proxy for callbacks.
sp<JNICameraContext>context = new JNICameraContext(env, weak_this, clazz, camera);
context->incStrong(thiz);
camera->setListener(context);
// save context inopaque field
env->SetIntField(thiz,fields.context, (int)context.get());
}
JNI函数里面,我们找到CameraC/S架构的客户端了,它调用connect函数向服务器发送连接请求。JNICameraContext这个类是一个监听类,用于处理底层Camera回调函数传来的数据和消息
看看客户端的connect函数有什么
===>>>frameworks/av/camera/Camera.cpp
sp<Camera>Camera::connect(int cameraId)
{
ALOGV("connect");
sp<Camera> c = new Camera();
const sp<ICameraService>& cs =getCameraService();
if (cs != 0) {
c->mCamera = cs->connect(c,cameraId);
}
if (c->mCamera != 0) {
c->mCamera->asBinder()->linkToDeath(c);
c->mStatus = NO_ERROR;
} else {
c.clear();
}
return c;
}
constsp<ICameraService>& cs =getCameraService();通过getCameraService()函数获取一个Camera服务实例。
// establish binder interface to cameraservice
const sp<ICameraService>&Camera::getCameraService()
{
Mutex::Autolock _l(mLock);
if (mCameraService.get() == 0) {
sp<IServiceManager> sm =defaultServiceManager();
sp<IBinder> binder;
do {
binder =sm->getService(String16("media.camera"));
if (binder != 0)
break;
ALOGW("CameraServicenot published, waiting...");
usleep(500000); // 0.5 s
} while(true);
if (mDeathNotifier == NULL) {
mDeathNotifier = newDeathNotifier();
}
binder->linkToDeath(mDeathNotifier);
mCameraService= interface_cast<ICameraService>(binder);
}
ALOGE_IF(mCameraService==0, "noCameraService!?");
return mCameraService;
}
可以看出,该CameraService实例是通过binder获取的,由binder机制可以知道,该服务就是CameraService一个实例。
c->mCamera= cs->connect(c, cameraId);
然后执行服务端的connect()函数,并返回一个ICamera对象赋值给Camera的mCamera,服务端connect()返回的其实是它内部类client的一个实例。
sp<ICamera>CameraService::connect(
const sp<ICameraClient>&cameraClient, int cameraId) {
int callingPid = getCallingPid();
LOG1("CameraService::connect E(pid %d, id %d)", callingPid, cameraId);
if(!mModule) {
ALOGE("Camera HAL module not loaded");
return NULL;
}
sp<Client> client;
if (cameraId < 0 || cameraId >=mNumberOfCameras) {
ALOGE("CameraService::connectX (pid %d) rejected (invalid cameraId %d).",
callingPid, cameraId);
return NULL;
}
char value[PROPERTY_VALUE_MAX];
property_get("sys.secpolicy.camera.disabled", value, "0");
if (strcmp(value, "1") ==0) {
// Camera is disabled byDevicePolicyManager.
ALOGI("Camera is disabled.connect X (pid %d) rejected", callingPid);
return NULL;
}
Mutex::Autolock lock(mServiceLock);
if (mClient[cameraId] != 0) {
client =mClient[cameraId].promote();
if (client != 0) {
if(cameraClient->asBinder() ==client->getCameraClient()->asBinder()) {
LOG1("CameraService::connect X (pid %d) (the same client)",
callingPid);
return client;
} else {
ALOGW("CameraService::connect X (pid %d) rejected (existingclient).",
callingPid);
return NULL;
}
}
mClient[cameraId].clear();
}
if (mBusy[cameraId]) {
ALOGW("CameraService::connectX (pid %d) rejected"
" (camera %d isstill busy).", callingPid, cameraId);
return NULL;
}
struct camera_info info;
if(mModule->get_camera_info(cameraId, &info) != OK) {
ALOGE("Invalid camera id%d", cameraId);
return NULL;
}
int deviceVersion;
if(mModule->common.module_api_version ==CAMERA_MODULE_API_VERSION_2_0) {
deviceVersion =info.device_version;
} else {
deviceVersion =CAMERA_DEVICE_API_VERSION_1_0;
}
switch(deviceVersion) {
caseCAMERA_DEVICE_API_VERSION_1_0:
client = new CameraClient(this,cameraClient, cameraId,
info.facing,callingPid, getpid());
break;
caseCAMERA_DEVICE_API_VERSION_2_0:
client = newCamera2Client(this, cameraClient, cameraId,
info.facing,callingPid, getpid());
break;
default:
ALOGE("Unknown cameradevice HAL version: %d", deviceVersion);
return NULL;
}
if(client->initialize(mModule) != OK) {
return NULL;
}
cameraClient->asBinder()->linkToDeath(this);
mClient[cameraId] = client;
LOG1("CameraService::connect X(id %d, this pid is %d)", cameraId, getpid());
return client;
}
在函数client->initialize(mModule)中实例化CameraHal接口hardware,hardware调用initialize()进入HAL层打开Camear驱动
status_tCameraClient::initialize(camera_module_t *module) {
int callingPid = getCallingPid();
LOG1("CameraClient::initialize E (pid %d, id %d)",callingPid, mCameraId);
char camera_device_name[10];
status_t res;
snprintf(camera_device_name, sizeof(camera_device_name), "%d",mCameraId);
mHardware= new CameraHardwareInterface(camera_device_name);
res =mHardware->initialize(&module->common);
if (res != OK) {
ALOGE("%s: Camera %d: unable to initialize device: %s(%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
mHardware.clear();
return NO_INIT;
}
mHardware->setCallbacks(notifyCallback,
dataCallback,
dataCallbackTimestamp,
(void *)mCameraId);
// Enable zoom, error, focus, and metadata messages by default
enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM |CAMERA_MSG_FOCUS |
CAMERA_MSG_PREVIEW_METADATA |CAMERA_MSG_FOCUS_MOVE);
LOG1("CameraClient::initialize X (pid %d, id %d)",callingPid, mCameraId);
return OK;
}
hardware调用initialize()进入HAL层打开Camear驱动
status_t initialize(hw_module_t *module)
{
ALOGI("Opening camera %s", mName.string());
int rc = module->methods->open(module, mName.string(),
(hw_device_t**)&mDevice); //这一句作用就是打开Camera底层驱动
if (rc != OK) {
ALOGE("Could not open camera %s: %d",mName.string(), rc);
return rc;
}
initHalPreviewWindow();
return rc;
}
hardware->initialize(&mModule->common)中mModule模块是一个结构体camera_module_t,他是怎么初始化的呢?我们发现CameraService里面有个函数
voidCameraService::onFirstRef()
{
BnCameraService::onFirstRef();
if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,
(const hw_module_t **)&mModule) < 0) {
ALOGE("Could not load camera HAL module");
mNumberOfCameras = 0;
}
else {
mNumberOfCameras = mModule->get_number_of_cameras();
if (mNumberOfCameras > MAX_CAMERAS) {
ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
mNumberOfCameras, MAX_CAMERAS);
mNumberOfCameras = MAX_CAMERAS;
}
for (int i = 0; i < mNumberOfCameras; i++) {
setCameraFree(i);
}
}
}
CameraService调用
hw_get_module(CAMERA_HARDWARE_MODULE_ID,(const hw_module_t **)&mModule)
来装载fakecamera HAL module:camera.duck.so
inthw_get_module(const char *id, const struct hw_module_t **module)
{
returnhw_get_module_by_class(id, NULL, module);
}
inthw_get_module_by_class(const char *class_id, const char *inst,
const struct hw_module_t **module)
{
int status;
int i;
const structhw_module_t *hmi = NULL;
charprop[PATH_MAX];
charpath[PATH_MAX];
charname[PATH_MAX];
if (inst)
snprintf(name, PATH_MAX, "%s.%s", class_id, inst);
else
strlcpy(name, class_id, PATH_MAX);
/*
* Here werely on the fact that calling dlopen multiple times on
* the same.so will simply increment a refcount (and not load
* a new copyof the library).
* We alsoassume that dlopen() is thread-safe.
*/
/* Loopthrough the configuration variants looking for a module */
for (i=0 ;i<HAL_VARIANT_KEYS_COUNT+1 ; i++) {
if (i <HAL_VARIANT_KEYS_COUNT) {
if(property_get(variant_keys[i], prop, NULL) == 0) {
continue;
}
snprintf(path, sizeof(path), "%s/%s.%s.so",
HAL_LIBRARY_PATH2, name, prop);
if(access(path, R_OK) == 0) break;
snprintf(path, sizeof(path), "%s/%s.%s.so",
HAL_LIBRARY_PATH1, name, prop);
if(access(path, R_OK) == 0) break;
} else {
snprintf(path, sizeof(path), "%s/%s.default.so",
HAL_LIBRARY_PATH1, name);
if(access(path, R_OK) == 0) break;
}
}
camera.duck.somodule 的代码在development/tools/emulator/system/camera/
====>development/tools/emulator/system/camera/Android.mk
LOCAL_SRC_FILES :=\
EmulatedCameraHal.cpp \
EmulatedCameraFactory.cpp \
EmulatedBaseCamera.cpp \
EmulatedCamera.cpp \
EmulatedCameraDevice.cpp \
EmulatedQemuCamera.cpp \
EmulatedQemuCameraDevice.cpp \
EmulatedFakeCamera.cpp \
EmulatedFakeCameraDevice.cpp \
Converters.cpp \
PreviewWindow.cpp \
CallbackNotifier.cpp \
QemuClient.cpp \
JpegCompressor.cpp \
EmulatedCamera2.cpp \
EmulatedFakeCamera2.cpp \
EmulatedQemuCamera2.cpp \
fake-pipeline2/Scene.cpp \
fake-pipeline2/Sensor.cpp \
fake-pipeline2/JpegCompressor.cpp
ifeq($(TARGET_PRODUCT),vbox_x86)
LOCAL_MODULE :=camera.vbox_x86
else
LOCAL_MODULE :=camera.duck
endif
了解HAL层的都知道hw_get_module函数就是用来获取模块的Halstub,这里是通过CAMERA_HARDWARE_MODULE_ID获取CameraHal层的代理stub,并赋值给mModule,后面就可通过操作mModule完成对Camera模块的控制。那么onFirstRef()函数又是何时调用的?
onFirstRef()属于其父类RefBase,该函数在强引用sp新增引用计数时调用,什么意思?就是当有sp包装的类初始化的时候调用,那么camera是何时调用的呢?可以发现在
客户端发起连接时候
sp<Camera>Camera::connect(int cameraId)
{
LOGV("connect");
sp<Camera> c = new Camera();
constsp<ICameraService>& cs = getCameraService();
}
这个时候初始化了一个CameraService实例,且用Sp包装,这个时候sp将新增计数,相应的CameraService实例里面onFirstRef()函数完成调用。
CameraService::connect()返回client的时候,就表明客户端和服务端连接建立。Camera完成初始化,可以进行拍照和preview等动作。