added linux makefile

This commit is contained in:
Colin Sherratt
2014-05-26 03:42:23 -04:00
parent cb5a46cd58
commit 6b733ba3a3
141 changed files with 53997 additions and 6 deletions

View File

@ -0,0 +1,73 @@
/************************************************************************************
Filename : CAPI_DistortionRenderer.cpp
Content : Combines all of the rendering state associated with the HMD
Created : February 2, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#include "CAPI_DistortionRenderer.h"
#if defined (OVR_OS_WIN32)
// TBD: Move to separate config file that handles back-ends.
#define OVR_D3D_VERSION 11
#include "D3D1X/CAPI_D3D1X_DistortionRenderer.h"
#undef OVR_D3D_VERSION
#define OVR_D3D_VERSION 10
#include "D3D1X/CAPI_D3D1X_DistortionRenderer.h"
#undef OVR_D3D_VERSION
#define OVR_D3D_VERSION 9
#include "D3D1X/CAPI_D3D9_DistortionRenderer.h"
#undef OVR_D3D_VERSION
#endif
#include "GL/CAPI_GL_DistortionRenderer.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// ***** DistortionRenderer
// TBD: Move to separate config file that handles back-ends.
DistortionRenderer::CreateFunc DistortionRenderer::APICreateRegistry[ovrRenderAPI_Count] =
{
0, // None
&GL::DistortionRenderer::Create,
0, // Android_GLES
#if defined (OVR_OS_WIN32)
&D3D9::DistortionRenderer::Create,
&D3D10::DistortionRenderer::Create,
&D3D11::DistortionRenderer::Create
#else
0,
0,
0
#endif
};
}} // namespace OVR::CAPI

View File

@ -0,0 +1,118 @@
/************************************************************************************
Filename : CAPI_DistortionRenderer.h
Content : Abstract interface for platform-specific rendering of distortion
Created : February 2, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#ifndef OVR_CAPI_DistortionRenderer_h
#define OVR_CAPI_DistortionRenderer_h
#include "CAPI_HMDRenderState.h"
#include "CAPI_FrameTimeManager.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// ***** CAPI::DistortionRenderer
// DistortionRenderer implements rendering of distortion and other overlay elements
// in platform-independent way.
// Platform-specific renderer back ends for CAPI are derived from this class.
class DistortionRenderer : public RefCountBase<DistortionRenderer>
{
// Quiet assignment compiler warning.
void operator = (const DistortionRenderer&) { }
public:
DistortionRenderer(ovrRenderAPIType api, ovrHmd hmd,
FrameTimeManager& timeManager,
const HMDRenderState& renderState)
: RenderAPI(api), HMD(hmd), TimeManager(timeManager), RState(renderState)
{ }
virtual ~DistortionRenderer()
{ }
// Configures the Renderer based on externally passed API settings. Must be
// called before use.
// Under D3D, apiConfig includes D3D Device pointer, back buffer and other
// needed structures.
virtual bool Initialize(const ovrRenderAPIConfig* apiConfig,
unsigned distortionCaps) = 0;
// Submits one eye texture for rendering. This is in the separate method to
// allow "submit as you render" scenarios on horizontal screens where one
// eye can be scanned out before the other.
virtual void SubmitEye(int eyeId, ovrTexture* eyeTexture) = 0;
// Finish the frame, optionally swapping buffers.
// Many implementations may actually apply the distortion here.
virtual void EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor,
unsigned char* latencyTester2DrawColor) = 0;
// Stores the current graphics pipeline state so it can be restored later.
void SaveGraphicsState() { if (!(RState.EnabledHmdCaps & ovrHmdCap_NoRestore)) GfxState->Save(); }
// Restores the saved graphics pipeline state.
void RestoreGraphicsState() { if (!(RState.EnabledHmdCaps & ovrHmdCap_NoRestore)) GfxState->Restore(); }
// *** Creation Factory logic
ovrRenderAPIType GetRenderAPI() const { return RenderAPI; }
// Creation function for this interface, registered for API.
typedef DistortionRenderer* (*CreateFunc)(ovrHmd hmd,
FrameTimeManager &timeManager,
const HMDRenderState& renderState);
static CreateFunc APICreateRegistry[ovrRenderAPI_Count];
protected:
class GraphicsState : public RefCountBase<GraphicsState>
{
public:
GraphicsState() : IsValid(false) {}
virtual ~GraphicsState() {}
virtual void Save() = 0;
virtual void Restore() = 0;
protected:
bool IsValid;
};
const ovrRenderAPIType RenderAPI;
const ovrHmd HMD;
FrameTimeManager& TimeManager;
const HMDRenderState& RState;
Ptr<GraphicsState> GfxState;
};
}} // namespace OVR::CAPI
#endif // OVR_CAPI_DistortionRenderer_h

View File

@ -0,0 +1,675 @@
/************************************************************************************
Filename : CAPI_FrameTimeManager.cpp
Content : Manage frame timing and pose prediction for rendering
Created : November 30, 2013
Authors : Volga Aksoy, Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#include "CAPI_FrameTimeManager.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// ***** FrameLatencyTracker
FrameLatencyTracker::FrameLatencyTracker()
{
Reset();
}
void FrameLatencyTracker::Reset()
{
TrackerEnabled = true;
WaitMode = SampleWait_Zeroes;
FrameIndex = 0;
MatchCount = 0;
RenderLatencySeconds = 0.0;
TimewarpLatencySeconds = 0.0;
FrameDeltas.Clear();
}
unsigned char FrameLatencyTracker::GetNextDrawColor()
{
if (!TrackerEnabled || (WaitMode == SampleWait_Zeroes) ||
(FrameIndex >= FramesTracked))
{
return (unsigned char)Util::FrameTimeRecord::ReadbackIndexToColor(0);
}
OVR_ASSERT(FrameIndex < FramesTracked);
return (unsigned char)Util::FrameTimeRecord::ReadbackIndexToColor(FrameIndex+1);
}
void FrameLatencyTracker::SaveDrawColor(unsigned char drawColor, double endFrameTime,
double renderIMUTime, double timewarpIMUTime )
{
if (!TrackerEnabled || (WaitMode == SampleWait_Zeroes))
return;
if (FrameIndex < FramesTracked)
{
OVR_ASSERT(Util::FrameTimeRecord::ReadbackIndexToColor(FrameIndex+1) == drawColor);
OVR_UNUSED(drawColor);
// saves {color, endFrame time}
FrameEndTimes[FrameIndex].ReadbackIndex = FrameIndex + 1;
FrameEndTimes[FrameIndex].TimeSeconds = endFrameTime;
FrameEndTimes[FrameIndex].RenderIMUTimeSeconds = renderIMUTime;
FrameEndTimes[FrameIndex].TimewarpIMUTimeSeconds= timewarpIMUTime;
FrameEndTimes[FrameIndex].MatchedRecord = false;
FrameIndex++;
}
else
{
// If the request was outstanding for too long, switch to zero mode to restart.
if (endFrameTime > (FrameEndTimes[FrameIndex-1].TimeSeconds + 0.15))
{
if (MatchCount == 0)
{
// If nothing was matched, we have no latency reading.
RenderLatencySeconds = 0.0;
TimewarpLatencySeconds = 0.0;
}
WaitMode = SampleWait_Zeroes;
MatchCount = 0;
FrameIndex = 0;
}
}
}
void FrameLatencyTracker::MatchRecord(const Util::FrameTimeRecordSet &r)
{
if (!TrackerEnabled)
return;
if (WaitMode == SampleWait_Zeroes)
{
// Do we have all zeros?
if (r.IsAllZeroes())
{
OVR_ASSERT(FrameIndex == 0);
WaitMode = SampleWait_Match;
MatchCount = 0;
}
return;
}
// We are in Match Mode. Wait until all colors are matched or timeout,
// at which point we go back to zeros.
for (int i = 0; i < FrameIndex; i++)
{
int recordIndex = 0;
int consecutiveMatch = 0;
OVR_ASSERT(FrameEndTimes[i].ReadbackIndex != 0);
if (r.FindReadbackIndex(&recordIndex, FrameEndTimes[i].ReadbackIndex))
{
// Advance forward to see that we have several more matches.
int ri = recordIndex + 1;
int j = i + 1;
consecutiveMatch++;
for (; (j < FrameIndex) && (ri < Util::FrameTimeRecordSet::RecordCount); j++, ri++)
{
if (r[ri].ReadbackIndex != FrameEndTimes[j].ReadbackIndex)
break;
consecutiveMatch++;
}
// Match at least 2 items in the row, to avoid accidentally matching color.
if (consecutiveMatch > 1)
{
// Record latency values for all but last samples. Keep last 2 samples
// for the future to simplify matching.
for (int q = 0; q < consecutiveMatch; q++)
{
const Util::FrameTimeRecord &scanoutFrame = r[recordIndex+q];
FrameTimeRecordEx &renderFrame = FrameEndTimes[i+q];
if (!renderFrame.MatchedRecord)
{
double deltaSeconds = scanoutFrame.TimeSeconds - renderFrame.TimeSeconds;
if (deltaSeconds > 0.0)
{
FrameDeltas.AddTimeDelta(deltaSeconds);
LatencyRecordTime = scanoutFrame.TimeSeconds;
RenderLatencySeconds = scanoutFrame.TimeSeconds - renderFrame.RenderIMUTimeSeconds;
TimewarpLatencySeconds = (renderFrame.TimewarpIMUTimeSeconds == 0.0) ? 0.0 :
(scanoutFrame.TimeSeconds - renderFrame.TimewarpIMUTimeSeconds);
}
renderFrame.MatchedRecord = true;
MatchCount++;
}
}
// Exit for.
break;
}
}
} // for ( i => FrameIndex )
// If we matched all frames, start over.
if (MatchCount == FramesTracked)
{
WaitMode = SampleWait_Zeroes;
MatchCount = 0;
FrameIndex = 0;
}
}
void FrameLatencyTracker::GetLatencyTimings(float latencies[3])
{
if (ovr_GetTimeInSeconds() > (LatencyRecordTime + 2.0))
{
latencies[0] = 0.0f;
latencies[1] = 0.0f;
latencies[2] = 0.0f;
}
else
{
latencies[0] = (float)RenderLatencySeconds;
latencies[1] = (float)TimewarpLatencySeconds;
latencies[2] = (float)FrameDeltas.GetMedianTimeDelta();
}
}
//-------------------------------------------------------------------------------------
FrameTimeManager::FrameTimeManager(bool vsyncEnabled)
: VsyncEnabled(vsyncEnabled), DynamicPrediction(true), SdkRender(false),
FrameTiming()
{
RenderIMUTimeSeconds = 0.0;
TimewarpIMUTimeSeconds = 0.0;
// HACK: SyncToScanoutDelay observed close to 1 frame in video cards.
// Overwritten by dynamic latency measurement on DK2.
VSyncToScanoutDelay = 0.013f;
NoVSyncToScanoutDelay = 0.004f;
}
void FrameTimeManager::Init(HmdRenderInfo& renderInfo)
{
// Set up prediction distances.
// With-Vsync timings.
RenderInfo = renderInfo;
ScreenSwitchingDelay = RenderInfo.Shutter.PixelSettleTime * 0.5f +
RenderInfo.Shutter.PixelPersistence * 0.5f;
}
void FrameTimeManager::ResetFrameTiming(unsigned frameIndex,
bool dynamicPrediction,
bool sdkRender)
{
DynamicPrediction = dynamicPrediction;
SdkRender = sdkRender;
FrameTimeDeltas.Clear();
DistortionRenderTimes.Clear();
ScreenLatencyTracker.Reset();
FrameTiming.FrameIndex = frameIndex;
FrameTiming.NextFrameTime = 0.0;
FrameTiming.ThisFrameTime = 0.0;
FrameTiming.Inputs.FrameDelta = calcFrameDelta();
FrameTiming.Inputs.ScreenDelay = calcScreenDelay();
FrameTiming.Inputs.TimewarpWaitDelta = 0.0f;
LocklessTiming.SetState(FrameTiming);
}
double FrameTimeManager::calcFrameDelta() const
{
// Timing difference between frame is tracked by FrameTimeDeltas, or
// is a hard-coded value of 1/FrameRate.
double frameDelta;
if (!VsyncEnabled)
{
frameDelta = 0.0;
}
else if (FrameTimeDeltas.GetCount() > 3)
{
frameDelta = FrameTimeDeltas.GetMedianTimeDelta();
if (frameDelta > (RenderInfo.Shutter.VsyncToNextVsync + 0.001))
frameDelta = RenderInfo.Shutter.VsyncToNextVsync;
}
else
{
frameDelta = RenderInfo.Shutter.VsyncToNextVsync;
}
return frameDelta;
}
double FrameTimeManager::calcScreenDelay() const
{
double screenDelay = ScreenSwitchingDelay;
double measuredVSyncToScanout;
// Use real-time DK2 latency tester HW for prediction if its is working.
// Do sanity check under 60 ms
if (!VsyncEnabled)
{
screenDelay += NoVSyncToScanoutDelay;
}
else if ( DynamicPrediction &&
(ScreenLatencyTracker.FrameDeltas.GetCount() > 3) &&
(measuredVSyncToScanout = ScreenLatencyTracker.FrameDeltas.GetMedianTimeDelta(),
(measuredVSyncToScanout > 0.0001) && (measuredVSyncToScanout < 0.06)) )
{
screenDelay += measuredVSyncToScanout;
}
else
{
screenDelay += VSyncToScanoutDelay;
}
return screenDelay;
}
double FrameTimeManager::calcTimewarpWaitDelta() const
{
// If timewarp timing hasn't been calculated, we should wait.
if (!VsyncEnabled)
return 0.0;
if (SdkRender)
{
if (NeedDistortionTimeMeasurement())
return 0.0;
return -(DistortionRenderTimes.GetMedianTimeDelta() + 0.002);
}
// Just a hard-coded "high" value for game-drawn code.
// TBD: Just return 0 and let users calculate this themselves?
return -0.003;
}
void FrameTimeManager::Timing::InitTimingFromInputs(const FrameTimeManager::TimingInputs& inputs,
HmdShutterTypeEnum shutterType,
double thisFrameTime, unsigned int frameIndex)
{
// ThisFrameTime comes from the end of last frame, unless it it changed.
double nextFrameBase;
double frameDelta = inputs.FrameDelta;
FrameIndex = frameIndex;
ThisFrameTime = thisFrameTime;
NextFrameTime = ThisFrameTime + frameDelta;
nextFrameBase = NextFrameTime + inputs.ScreenDelay;
MidpointTime = nextFrameBase + frameDelta * 0.5;
TimewarpPointTime = (inputs.TimewarpWaitDelta == 0.0) ?
0.0 : (NextFrameTime + inputs.TimewarpWaitDelta);
// Calculate absolute points in time when eye rendering or corresponding time-warp
// screen edges will become visible.
// This only matters with VSync.
switch(shutterType)
{
case HmdShutter_RollingTopToBottom:
EyeRenderTimes[0] = MidpointTime;
EyeRenderTimes[1] = MidpointTime;
TimeWarpStartEndTimes[0][0] = nextFrameBase;
TimeWarpStartEndTimes[0][1] = nextFrameBase + frameDelta;
TimeWarpStartEndTimes[1][0] = nextFrameBase;
TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta;
break;
case HmdShutter_RollingLeftToRight:
EyeRenderTimes[0] = nextFrameBase + frameDelta * 0.25;
EyeRenderTimes[1] = nextFrameBase + frameDelta * 0.75;
/*
// TBD: MA: It is probably better if mesh sets it up per-eye.
// Would apply if screen is 0 -> 1 for each eye mesh
TimeWarpStartEndTimes[0][0] = nextFrameBase;
TimeWarpStartEndTimes[0][1] = MidpointTime;
TimeWarpStartEndTimes[1][0] = MidpointTime;
TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta;
*/
// Mesh is set up to vary from Edge of scree 0 -> 1 across both eyes
TimeWarpStartEndTimes[0][0] = nextFrameBase;
TimeWarpStartEndTimes[0][1] = nextFrameBase + frameDelta;
TimeWarpStartEndTimes[1][0] = nextFrameBase;
TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta;
break;
case HmdShutter_RollingRightToLeft:
EyeRenderTimes[0] = nextFrameBase + frameDelta * 0.75;
EyeRenderTimes[1] = nextFrameBase + frameDelta * 0.25;
// This is *Correct* with Tom's distortion mesh organization.
TimeWarpStartEndTimes[0][0] = nextFrameBase ;
TimeWarpStartEndTimes[0][1] = nextFrameBase + frameDelta;
TimeWarpStartEndTimes[1][0] = nextFrameBase ;
TimeWarpStartEndTimes[1][1] = nextFrameBase + frameDelta;
break;
case HmdShutter_Global:
// TBD
EyeRenderTimes[0] = MidpointTime;
EyeRenderTimes[1] = MidpointTime;
TimeWarpStartEndTimes[0][0] = MidpointTime;
TimeWarpStartEndTimes[0][1] = MidpointTime;
TimeWarpStartEndTimes[1][0] = MidpointTime;
TimeWarpStartEndTimes[1][1] = MidpointTime;
break;
default:
break;
}
}
double FrameTimeManager::BeginFrame(unsigned frameIndex)
{
RenderIMUTimeSeconds = 0.0;
TimewarpIMUTimeSeconds = 0.0;
// ThisFrameTime comes from the end of last frame, unless it it changed.
double thisFrameTime = (FrameTiming.NextFrameTime != 0.0) ?
FrameTiming.NextFrameTime : ovr_GetTimeInSeconds();
// We are starting to process a new frame...
FrameTiming.InitTimingFromInputs(FrameTiming.Inputs, RenderInfo.Shutter.Type,
thisFrameTime, frameIndex);
return FrameTiming.ThisFrameTime;
}
void FrameTimeManager::EndFrame()
{
// Record timing since last frame; must be called after Present & sync.
FrameTiming.NextFrameTime = ovr_GetTimeInSeconds();
if (FrameTiming.ThisFrameTime > 0.0)
{
FrameTimeDeltas.AddTimeDelta(FrameTiming.NextFrameTime - FrameTiming.ThisFrameTime);
FrameTiming.Inputs.FrameDelta = calcFrameDelta();
}
// Write to Lock-less
LocklessTiming.SetState(FrameTiming);
}
// Thread-safe function to query timing for a future frame
FrameTimeManager::Timing FrameTimeManager::GetFrameTiming(unsigned frameIndex)
{
Timing frameTiming = LocklessTiming.GetState();
if (frameTiming.ThisFrameTime != 0.0)
{
// If timing hasn't been initialized, starting based on "now" is the best guess.
frameTiming.InitTimingFromInputs(frameTiming.Inputs, RenderInfo.Shutter.Type,
ovr_GetTimeInSeconds(), frameIndex);
}
else if (frameIndex > frameTiming.FrameIndex)
{
unsigned frameDelta = frameIndex - frameTiming.FrameIndex;
double thisFrameTime = frameTiming.NextFrameTime +
double(frameDelta-1) * frameTiming.Inputs.FrameDelta;
// Don't run away too far into the future beyond rendering.
OVR_ASSERT(frameDelta < 6);
frameTiming.InitTimingFromInputs(frameTiming.Inputs, RenderInfo.Shutter.Type,
thisFrameTime, frameIndex);
}
return frameTiming;
}
double FrameTimeManager::GetEyePredictionTime(ovrEyeType eye)
{
if (VsyncEnabled)
{
return FrameTiming.EyeRenderTimes[eye];
}
// No VSync: Best guess for the near future
return ovr_GetTimeInSeconds() + ScreenSwitchingDelay + NoVSyncToScanoutDelay;
}
Transformf FrameTimeManager::GetEyePredictionPose(ovrHmd hmd, ovrEyeType eye)
{
double eyeRenderTime = GetEyePredictionTime(eye);
ovrSensorState eyeState = ovrHmd_GetSensorState(hmd, eyeRenderTime);
// EyeRenderPoses[eye] = eyeState.Predicted.Pose;
// Record view pose sampling time for Latency reporting.
if (RenderIMUTimeSeconds == 0.0)
RenderIMUTimeSeconds = eyeState.Recorded.TimeInSeconds;
return eyeState.Predicted.Pose;
}
void FrameTimeManager::GetTimewarpPredictions(ovrEyeType eye, double timewarpStartEnd[2])
{
if (VsyncEnabled)
{
timewarpStartEnd[0] = FrameTiming.TimeWarpStartEndTimes[eye][0];
timewarpStartEnd[1] = FrameTiming.TimeWarpStartEndTimes[eye][1];
return;
}
// Free-running, so this will be displayed immediately.
// Unfortunately we have no idea which bit of the screen is actually going to be displayed.
// TODO: guess which bit of the screen is being displayed!
// (e.g. use DONOTWAIT on present and see when the return isn't WASSTILLWAITING?)
// We have no idea where scan-out is currently, so we can't usefully warp the screen spatially.
timewarpStartEnd[0] = ovr_GetTimeInSeconds() + ScreenSwitchingDelay + NoVSyncToScanoutDelay;
timewarpStartEnd[1] = timewarpStartEnd[0];
}
void FrameTimeManager::GetTimewarpMatrices(ovrHmd hmd, ovrEyeType eyeId,
ovrPosef renderPose, ovrMatrix4f twmOut[2])
{
if (!hmd)
{
return;
}
double timewarpStartEnd[2] = { 0.0, 0.0 };
GetTimewarpPredictions(eyeId, timewarpStartEnd);
ovrSensorState startState = ovrHmd_GetSensorState(hmd, timewarpStartEnd[0]);
ovrSensorState endState = ovrHmd_GetSensorState(hmd, timewarpStartEnd[1]);
if (TimewarpIMUTimeSeconds == 0.0)
TimewarpIMUTimeSeconds = startState.Recorded.TimeInSeconds;
Quatf quatFromStart = startState.Predicted.Pose.Orientation;
Quatf quatFromEnd = endState.Predicted.Pose.Orientation;
Quatf quatFromEye = renderPose.Orientation; //EyeRenderPoses[eyeId].Orientation;
quatFromEye.Invert();
Quatf timewarpStartQuat = quatFromEye * quatFromStart;
Quatf timewarpEndQuat = quatFromEye * quatFromEnd;
Matrix4f timewarpStart(timewarpStartQuat);
Matrix4f timewarpEnd(timewarpEndQuat);
// The real-world orientations have: X=right, Y=up, Z=backwards.
// The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards.
// So we need to perform a similarity transform on this delta matrix.
// The verbose code would look like this:
/*
Matrix4f matBasisChange;
matBasisChange.SetIdentity();
matBasisChange.M[0][0] = 1.0f;
matBasisChange.M[1][1] = -1.0f;
matBasisChange.M[2][2] = -1.0f;
Matrix4f matBasisChangeInv = matBasisChange.Inverted();
matRenderFromNow = matBasisChangeInv * matRenderFromNow * matBasisChange;
*/
// ...but of course all the above is a constant transform and much more easily done.
// We flip the signs of the Y&Z row, then flip the signs of the Y&Z column,
// and of course most of the flips cancel:
// +++ +-- +--
// +++ -> flip Y&Z columns -> +-- -> flip Y&Z rows -> -++
// +++ +-- -++
timewarpStart.M[0][1] = -timewarpStart.M[0][1];
timewarpStart.M[0][2] = -timewarpStart.M[0][2];
timewarpStart.M[1][0] = -timewarpStart.M[1][0];
timewarpStart.M[2][0] = -timewarpStart.M[2][0];
timewarpEnd .M[0][1] = -timewarpEnd .M[0][1];
timewarpEnd .M[0][2] = -timewarpEnd .M[0][2];
timewarpEnd .M[1][0] = -timewarpEnd .M[1][0];
timewarpEnd .M[2][0] = -timewarpEnd .M[2][0];
twmOut[0] = timewarpStart;
twmOut[1] = timewarpEnd;
}
// Used by renderer to determine if it should time distortion rendering.
bool FrameTimeManager::NeedDistortionTimeMeasurement() const
{
if (!VsyncEnabled)
return false;
return DistortionRenderTimes.GetCount() < 10;
}
void FrameTimeManager::AddDistortionTimeMeasurement(double distortionTimeSeconds)
{
DistortionRenderTimes.AddTimeDelta(distortionTimeSeconds);
// If timewarp timing changes based on this sample, update it.
double newTimewarpWaitDelta = calcTimewarpWaitDelta();
if (newTimewarpWaitDelta != FrameTiming.Inputs.TimewarpWaitDelta)
{
FrameTiming.Inputs.TimewarpWaitDelta = newTimewarpWaitDelta;
LocklessTiming.SetState(FrameTiming);
}
}
void FrameTimeManager::UpdateFrameLatencyTrackingAfterEndFrame(
unsigned char frameLatencyTestColor,
const Util::FrameTimeRecordSet& rs)
{
// FrameTiming.NextFrameTime in this context (after EndFrame) is the end frame time.
ScreenLatencyTracker.SaveDrawColor(frameLatencyTestColor,
FrameTiming.NextFrameTime,
RenderIMUTimeSeconds,
TimewarpIMUTimeSeconds);
ScreenLatencyTracker.MatchRecord(rs);
// If screen delay changed, update timing.
double newScreenDelay = calcScreenDelay();
if (newScreenDelay != FrameTiming.Inputs.ScreenDelay)
{
FrameTiming.Inputs.ScreenDelay = newScreenDelay;
LocklessTiming.SetState(FrameTiming);
}
}
//-----------------------------------------------------------------------------------
// ***** TimeDeltaCollector
void TimeDeltaCollector::AddTimeDelta(double timeSeconds)
{
// avoid adding invalid timing values
if(timeSeconds < 0.0f)
return;
if (Count == Capacity)
{
for(int i=0; i< Count-1; i++)
TimeBufferSeconds[i] = TimeBufferSeconds[i+1];
Count--;
}
TimeBufferSeconds[Count++] = timeSeconds;
}
double TimeDeltaCollector::GetMedianTimeDelta() const
{
double SortedList[Capacity];
bool used[Capacity];
memset(used, 0, sizeof(used));
SortedList[0] = 0.0; // In case Count was 0...
// Probably the slowest way to find median...
for (int i=0; i<Count; i++)
{
double smallestDelta = 1000000.0;
int index = 0;
for (int j = 0; j < Count; j++)
{
if (!used[j])
{
if (TimeBufferSeconds[j] < smallestDelta)
{
smallestDelta = TimeBufferSeconds[j];
index = j;
}
}
}
// Mark as used
used[index] = true;
SortedList[i] = smallestDelta;
}
return SortedList[Count/2];
}
}} // namespace OVR::CAPI

View File

@ -0,0 +1,264 @@
/************************************************************************************
Filename : CAPI_FrameTimeManager.h
Content : Manage frame timing and pose prediction for rendering
Created : November 30, 2013
Authors : Volga Aksoy, Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#ifndef OVR_CAPI_FrameTimeManager_h
#define OVR_CAPI_FrameTimeManager_h
#include "../OVR_CAPI.h"
#include "../Kernel/OVR_Timer.h"
#include "../Kernel/OVR_Math.h"
#include "../Util/Util_Render_Stereo.h"
#include "../Util/Util_LatencyTest2.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// Helper class to collect median times between frames, so that we know
// how long to wait.
struct TimeDeltaCollector
{
TimeDeltaCollector() : Count(0) { }
void AddTimeDelta(double timeSeconds);
void Clear() { Count = 0; }
double GetMedianTimeDelta() const;
double GetCount() const { return Count; }
enum { Capacity = 12 };
private:
int Count;
double TimeBufferSeconds[Capacity];
};
//-------------------------------------------------------------------------------------
// ***** FrameLatencyTracker
// FrameLatencyTracker tracks frame Present to display Scan-out timing, as reported by
// the DK2 internal latency tester pixel read-back. The computed value is used in
// FrameTimeManager for prediction. View Render and TimeWarp to scan-out latencies are
// also reported for debugging.
//
// The class operates by generating color values from GetNextDrawColor() that must
// be rendered on the back end and then looking for matching values in FrameTimeRecordSet
// structure as reported by HW.
class FrameLatencyTracker
{
public:
enum { FramesTracked = Util::LT2_IncrementCount-1 };
FrameLatencyTracker();
// DrawColor == 0 is special in that it doesn't need saving of timestamp
unsigned char GetNextDrawColor();
void SaveDrawColor(unsigned char drawColor, double endFrameTime,
double renderIMUTime, double timewarpIMUTime );
void MatchRecord(const Util::FrameTimeRecordSet &r);
void GetLatencyTimings(float latencies[3]);
void Reset();
public:
struct FrameTimeRecordEx : public Util::FrameTimeRecord
{
bool MatchedRecord;
double RenderIMUTimeSeconds;
double TimewarpIMUTimeSeconds;
};
// True if rendering read-back is enabled.
bool TrackerEnabled;
enum SampleWaitType {
SampleWait_Zeroes, // We are waiting for a record with all zeros.
SampleWait_Match // We are issuing & matching colors.
};
SampleWaitType WaitMode;
int MatchCount;
// Records of frame timings that we are trying to measure.
FrameTimeRecordEx FrameEndTimes[FramesTracked];
int FrameIndex;
// Median filter for (ScanoutTimeSeconds - PostPresent frame time)
TimeDeltaCollector FrameDeltas;
// Latency reporting results
double RenderLatencySeconds;
double TimewarpLatencySeconds;
double LatencyRecordTime;
};
//-------------------------------------------------------------------------------------
// ***** FrameTimeManager
// FrameTimeManager keeps track of rendered frame timing and handles predictions for
// orientations and time-warp.
class FrameTimeManager
{
public:
FrameTimeManager(bool vsyncEnabled = true);
// Data that affects frame timing computation.
struct TimingInputs
{
// Hard-coded value or dynamic as reported by FrameTimeDeltas.GetMedianTimeDelta().
double FrameDelta;
// Screen delay from present to scan-out, as potentially reported by ScreenLatencyTracker.
double ScreenDelay;
// Negative value of how many seconds before EndFrame we start timewarp. 0.0 if not used.
double TimewarpWaitDelta;
TimingInputs()
: FrameDelta(0), ScreenDelay(0), TimewarpWaitDelta(0)
{ }
};
// Timing values for a specific frame.
struct Timing
{
TimingInputs Inputs;
// Index of a frame that started at ThisFrameTime.
unsigned int FrameIndex;
// Predicted absolute times for when this frame will show up on screen.
// Generally, all values will be >= NextFrameTime, since that's the time we expect next
// vsync to succeed.
double ThisFrameTime;
double TimewarpPointTime;
double NextFrameTime;
double MidpointTime;
double EyeRenderTimes[2];
double TimeWarpStartEndTimes[2][2];
Timing()
{
memset(this, 0, sizeof(Timing));
}
void InitTimingFromInputs(const TimingInputs& inputs, HmdShutterTypeEnum shutterType,
double thisFrameTime, unsigned int frameIndex);
};
// Called on startup to provided data on HMD timing.
void Init(HmdRenderInfo& renderInfo);
// Called with each new ConfigureRendering.
void ResetFrameTiming(unsigned frameIndex,
bool dynamicPrediction, bool sdkRender);
void SetVsync(bool enabled) { VsyncEnabled = enabled; }
// BeginFrame returns time of the call
// TBD: Should this be a predicted time value instead ?
double BeginFrame(unsigned frameIndex);
void EndFrame();
// Thread-safe function to query timing for a future frame
Timing GetFrameTiming(unsigned frameIndex);
double GetEyePredictionTime(ovrEyeType eye);
Transformf GetEyePredictionPose(ovrHmd hmd, ovrEyeType eye);
void GetTimewarpPredictions(ovrEyeType eye, double timewarpStartEnd[2]);
void GetTimewarpMatrices(ovrHmd hmd, ovrEyeType eye, ovrPosef renderPose, ovrMatrix4f twmOut[2]);
// Used by renderer to determine if it should time distortion rendering.
bool NeedDistortionTimeMeasurement() const;
void AddDistortionTimeMeasurement(double distortionTimeSeconds);
// DK2 Lateny test interface
// Get next draw color for DK2 latency tester
unsigned char GetFrameLatencyTestDrawColor()
{ return ScreenLatencyTracker.GetNextDrawColor(); }
// Must be called after EndFrame() to update latency tester timings.
// Must pass color reported by NextFrameColor for this frame.
void UpdateFrameLatencyTrackingAfterEndFrame(unsigned char frameLatencyTestColor,
const Util::FrameTimeRecordSet& rs);
void GetLatencyTimings(float latencies[3])
{ return ScreenLatencyTracker.GetLatencyTimings(latencies); }
const Timing& GetFrameTiming() const { return FrameTiming; }
private:
double calcFrameDelta() const;
double calcScreenDelay() const;
double calcTimewarpWaitDelta() const;
HmdRenderInfo RenderInfo;
// Timings are collected through a median filter, to avoid outliers.
TimeDeltaCollector FrameTimeDeltas;
TimeDeltaCollector DistortionRenderTimes;
FrameLatencyTracker ScreenLatencyTracker;
// Timing changes if we have no Vsync (all prediction is reduced to fixed interval).
bool VsyncEnabled;
// Set if we are rendering via the SDK, so DistortionRenderTimes is valid.
bool DynamicPrediction;
// Set if SDk is doing teh rendering.
bool SdkRender;
// Total frame delay due to VsyncToFirstScanline, persistence and settle time.
// Computed from RenderInfor.Shutter.
double VSyncToScanoutDelay;
double NoVSyncToScanoutDelay;
double ScreenSwitchingDelay;
// Current (or last) frame timing info. Used as a source for LocklessTiming.
Timing FrameTiming;
// TBD: Don't we need NextFrame here as well?
LocklessUpdater<Timing> LocklessTiming;
// IMU Read timings
double RenderIMUTimeSeconds;
double TimewarpIMUTimeSeconds;
};
}} // namespace OVR::CAPI
#endif // OVR_CAPI_FrameTimeManager_h

View File

@ -0,0 +1,142 @@
/************************************************************************************
Filename : CAPI_GlobalState.cpp
Content : Maintains global state of the CAPI
Created : January 24, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#include "CAPI_GlobalState.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// Open Questions / Notes
// 2. Detect HMDs.
// Challenge: If we do everything through polling, it would imply we want all the devices
// initialized. However, there may be multiple rifts, extra sensors, etc,
// which shouldn't be allocated.
//
// How do you reset orientation Quaternion?
// Can you change IPD?
//-------------------------------------------------------------------------------------
// ***** OVRGlobalState
// Global instance
GlobalState* GlobalState::pInstance = 0;
GlobalState::GlobalState()
{
pManager = *DeviceManager::Create();
// Handle the DeviceManager's messages
pManager->AddMessageHandler( this );
EnumerateDevices();
// PhoneSensors::Init();
}
GlobalState::~GlobalState()
{
RemoveHandlerFromDevices();
OVR_ASSERT(HMDs.IsEmpty());
}
int GlobalState::EnumerateDevices()
{
// Need to use separate lock for device enumeration, as pManager->GetHandlerLock()
// would produce deadlocks here.
Lock::Locker lock(&EnumerationLock);
EnumeratedDevices.Clear();
DeviceEnumerator<HMDDevice> e = pManager->EnumerateDevices<HMDDevice>();
while(e.IsAvailable())
{
EnumeratedDevices.PushBack(DeviceHandle(e));
e.Next();
}
return (int)EnumeratedDevices.GetSize();
}
HMDDevice* GlobalState::CreateDevice(int index)
{
Lock::Locker lock(&EnumerationLock);
if (index >= (int)EnumeratedDevices.GetSize())
return 0;
return EnumeratedDevices[index].CreateDeviceTyped<HMDDevice>();
}
void GlobalState::AddHMD(HMDState* hmd)
{
Lock::Locker lock(pManager->GetHandlerLock());
HMDs.PushBack(hmd);
}
void GlobalState::RemoveHMD(HMDState* hmd)
{
Lock::Locker lock(pManager->GetHandlerLock());
hmd->RemoveNode();
}
void GlobalState::NotifyHMDs_AddDevice(DeviceType deviceType)
{
Lock::Locker lock(pManager->GetHandlerLock());
for(HMDState* hmd = HMDs.GetFirst(); !HMDs.IsNull(hmd); hmd = hmd->pNext)
hmd->NotifyAddDevice(deviceType);
}
void GlobalState::OnMessage(const Message& msg)
{
if (msg.Type == Message_DeviceAdded || msg.Type == Message_DeviceRemoved)
{
if (msg.pDevice == pManager)
{
const MessageDeviceStatus& statusMsg =
static_cast<const MessageDeviceStatus&>(msg);
if (msg.Type == Message_DeviceAdded)
{
//LogText("OnMessage DeviceAdded.\n");
// We may have added a sensor/other device; notify any HMDs that might
// need it to check for it later.
NotifyHMDs_AddDevice(statusMsg.Handle.GetType());
}
else
{
//LogText("OnMessage DeviceRemoved.\n");
}
}
}
}
}} // namespace OVR::CAPI

View File

@ -0,0 +1,84 @@
/************************************************************************************
Filename : CAPI_GlobalState.h
Content : Maintains global state of the CAPI
Created : January 24, 2013
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#ifndef OVR_CAPI_GlobalState_h
#define OVR_CAPI_GlobalState_h
#include "../OVR_CAPI.h"
#include "../OVR_Device.h"
#include "../Kernel/OVR_Timer.h"
#include "../Kernel/OVR_Math.h"
#include "CAPI_HMDState.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// ***** OVRGlobalState
// Global DeviceManager state - singleton instance of this is created
// by ovr_Initialize().
class GlobalState : public MessageHandler, public NewOverrideBase
{
public:
GlobalState();
~GlobalState();
static GlobalState *pInstance;
int EnumerateDevices();
HMDDevice* CreateDevice(int index);
// MessageHandler implementation
void OnMessage(const Message& msg);
// Helpers used to keep track of HMDs and notify them of sensor changes.
void AddHMD(HMDState* hmd);
void RemoveHMD(HMDState* hmd);
void NotifyHMDs_AddDevice(DeviceType deviceType);
const char* GetLastError()
{
return 0;
}
DeviceManager* GetManager() { return pManager; }
protected:
Ptr<DeviceManager> pManager;
Lock EnumerationLock;
Array<DeviceHandle> EnumeratedDevices;
// Currently created hmds; protected by Manager lock.
List<HMDState> HMDs;
};
}} // namespace OVR::CAPI
#endif

View File

@ -0,0 +1,143 @@
/************************************************************************************
Filename : OVR_CAPI_HMDRenderState.cpp
Content : Combines all of the rendering state associated with the HMD
Created : February 2, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#include "CAPI_HMDRenderState.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// ***** HMDRenderState
HMDRenderState::HMDRenderState(ovrHmd hmd, Profile* userProfile, const OVR::HMDInfo& hmdInfo)
: HMD(hmd), HMDInfo(hmdInfo)
{
RenderInfo = GenerateHmdRenderInfoFromHmdInfo( HMDInfo, userProfile );
Distortion[0] = CalculateDistortionRenderDesc(StereoEye_Left, RenderInfo, 0);
Distortion[1] = CalculateDistortionRenderDesc(StereoEye_Right, RenderInfo, 0);
ClearColor[0] = ClearColor[1] = ClearColor[2] = ClearColor[3] =0.0f;
EnabledHmdCaps = 0;
}
HMDRenderState::~HMDRenderState()
{
}
ovrHmdDesc HMDRenderState::GetDesc()
{
ovrHmdDesc d;
memset(&d, 0, sizeof(d));
d.Type = ovrHmd_Other;
d.ProductName = HMDInfo.ProductName;
d.Manufacturer = HMDInfo.Manufacturer;
d.Resolution.w = HMDInfo.ResolutionInPixels.w;
d.Resolution.h = HMDInfo.ResolutionInPixels.h;
d.WindowsPos.x = HMDInfo.DesktopX;
d.WindowsPos.y = HMDInfo.DesktopY;
d.DisplayDeviceName = HMDInfo.DisplayDeviceName;
d.DisplayId = HMDInfo.DisplayId;
d.HmdCaps = ovrHmdCap_Present | ovrHmdCap_NoVSync;
d.SensorCaps = ovrSensorCap_YawCorrection | ovrSensorCap_Orientation;
d.DistortionCaps = ovrDistortionCap_Chromatic | ovrDistortionCap_TimeWarp | ovrDistortionCap_Vignette;
if (strstr(HMDInfo.ProductName, "DK1"))
{
d.Type = ovrHmd_DK1;
}
else if (strstr(HMDInfo.ProductName, "DK2"))
{
d.Type = ovrHmd_DK2;
d.HmdCaps |= ovrHmdCap_LowPersistence |
ovrHmdCap_LatencyTest | ovrHmdCap_DynamicPrediction;
d.SensorCaps |= ovrSensorCap_Position;
}
DistortionRenderDesc& leftDistortion = Distortion[0];
DistortionRenderDesc& rightDistortion = Distortion[1];
// The suggested FOV (assuming eye rotation)
d.DefaultEyeFov[0] = CalculateFovFromHmdInfo(StereoEye_Left, leftDistortion, RenderInfo, OVR_DEFAULT_EXTRA_EYE_ROTATION);
d.DefaultEyeFov[1] = CalculateFovFromHmdInfo(StereoEye_Right, rightDistortion, RenderInfo, OVR_DEFAULT_EXTRA_EYE_ROTATION);
// FOV extended across the entire screen
d.MaxEyeFov[0] = GetPhysicalScreenFov(StereoEye_Left, leftDistortion);
d.MaxEyeFov[1] = GetPhysicalScreenFov(StereoEye_Right, rightDistortion);
if (HMDInfo.Shutter.Type == HmdShutter_RollingRightToLeft)
{
d.EyeRenderOrder[0] = ovrEye_Right;
d.EyeRenderOrder[1] = ovrEye_Left;
}
else
{
d.EyeRenderOrder[0] = ovrEye_Left;
d.EyeRenderOrder[1] = ovrEye_Right;
}
return d;
}
ovrSizei HMDRenderState::GetFOVTextureSize(int eye, ovrFovPort fov, float pixelsPerDisplayPixel)
{
OVR_ASSERT((unsigned)eye < 2);
StereoEye seye = (eye == ovrEye_Left) ? StereoEye_Left : StereoEye_Right;
return CalculateIdealPixelSize(seye, Distortion[eye], fov, pixelsPerDisplayPixel);
}
ovrEyeRenderDesc HMDRenderState::calcRenderDesc(ovrEyeType eyeType, const ovrFovPort& fov)
{
HmdRenderInfo& hmdri = RenderInfo;
StereoEye eye = (eyeType == ovrEye_Left) ? StereoEye_Left : StereoEye_Right;
ovrEyeRenderDesc e0;
e0.Eye = eyeType;
e0.Fov = fov;
e0.ViewAdjust = CalculateEyeVirtualCameraOffset(hmdri, eye, false);
e0.DistortedViewport = GetFramebufferViewport(eye, hmdri);
e0.PixelsPerTanAngleAtCenter = Distortion[0].PixelsPerTanAngleAtCenter;
return e0;
}
void HMDRenderState::setupRenderDesc( ovrEyeRenderDesc eyeRenderDescOut[2],
const ovrFovPort eyeFovIn[2] )
{
eyeRenderDescOut[0] = EyeRenderDesc[0] = calcRenderDesc(ovrEye_Left, eyeFovIn[0]);
eyeRenderDescOut[1] = EyeRenderDesc[1] = calcRenderDesc(ovrEye_Right, eyeFovIn[1]);
}
}} // namespace OVR::CAPI

View File

@ -0,0 +1,93 @@
/************************************************************************************
Filename : CAPI_HMDRenderState.h
Content : Combines all of the rendering state associated with the HMD
Created : February 2, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#ifndef OVR_CAPI_HMDRenderState_h
#define OVR_CAPI_HMDRenderState_h
#include "../OVR_CAPI.h"
#include "../Kernel/OVR_Math.h"
#include "../Util/Util_Render_Stereo.h"
namespace OVR { namespace CAPI {
using namespace OVR::Util::Render;
//-------------------------------------------------------------------------------------
// ***** HMDRenderState
// Combines all of the rendering setup information about one HMD.
class HMDRenderState : public NewOverrideBase
{
// Quiet assignment compiler warning.
void operator = (const HMDRenderState&) { }
public:
HMDRenderState(ovrHmd hmd, Profile* userProfile, const OVR::HMDInfo& hmdInfo);
virtual ~HMDRenderState();
// *** Rendering Setup
// Delegated access APIs
ovrHmdDesc GetDesc();
ovrSizei GetFOVTextureSize(int eye, ovrFovPort fov, float pixelsPerDisplayPixel);
ovrEyeRenderDesc calcRenderDesc(ovrEyeType eyeType, const ovrFovPort& fov);
void setupRenderDesc(ovrEyeRenderDesc eyeRenderDescOut[2],
const ovrFovPort eyeFovIn[2]);
public:
// HMDInfo shouldn't change, as its string pointers are passed out.
ovrHmd HMD;
const OVR::HMDInfo& HMDInfo;
//const char* pLastError;
HmdRenderInfo RenderInfo;
DistortionRenderDesc Distortion[2];
ovrEyeRenderDesc EyeRenderDesc[2];
// Clear color used for distortion
float ClearColor[4];
// Pose at which last time the eye was rendered, as submitted by EndEyeRender.
ovrPosef EyeRenderPoses[2];
// Capabilities passed to Configure.
unsigned EnabledHmdCaps;
unsigned DistortionCaps;
};
}} // namespace OVR::CAPI
#endif // OVR_CAPI_HMDState_h

View File

@ -0,0 +1,804 @@
/************************************************************************************
Filename : CAPI_HMDState.cpp
Content : State associated with a single HMD
Created : January 24, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#include "CAPI_HMDState.h"
#include "CAPI_GlobalState.h"
#include "../OVR_Profile.h"
namespace OVR { namespace CAPI {
//-------------------------------------------------------------------------------------
// ***** HMDState
HMDState::HMDState(HMDDevice* device)
: pHMD(device), HMDInfoW(device), HMDInfo(HMDInfoW.h),
EnabledHmdCaps(0), HmdCapsAppliedToSensor(0),
SensorStarted(0), SensorCreated(0), SensorCaps(0),
AddSensorCount(0), AddLatencyTestCount(0), AddLatencyTestDisplayCount(0),
RenderState(getThis(), pHMD->GetProfile(), HMDInfoW.h),
LastFrameTimeSeconds(0.0f), LastGetFrameTimeSeconds(0.0),
LatencyTestActive(false),
LatencyTest2Active(false)
{
pLastError = 0;
GlobalState::pInstance->AddHMD(this);
// Should be in renderer?
TimeManager.Init(RenderState.RenderInfo);
EyeRenderActive[0] = false;
EyeRenderActive[1] = false;
LatencyTestDrawColor[0] = 0;
LatencyTestDrawColor[1] = 0;
LatencyTestDrawColor[2] = 0;
OVR_CAPI_VISION_CODE( pPoseTracker = 0; )
RenderingConfigured = false;
BeginFrameCalled = false;
BeginFrameThreadId = 0;
BeginFrameTimingCalled = false;
}
HMDState::HMDState(ovrHmdType hmdType)
: pHMD(0), HMDInfoW(hmdType), HMDInfo(HMDInfoW.h),
EnabledHmdCaps(0),
SensorStarted(0), SensorCreated(0), SensorCaps(0),
AddSensorCount(0), AddLatencyTestCount(0), AddLatencyTestDisplayCount(0),
RenderState(getThis(), 0, HMDInfoW.h), // No profile.
LastFrameTimeSeconds(0.0), LastGetFrameTimeSeconds(0.0)
{
// TBD: We should probably be looking up the default profile for the given
// device type + user.
pLastError = 0;
GlobalState::pInstance->AddHMD(this);
// Should be in renderer?
TimeManager.Init(RenderState.RenderInfo);
EyeRenderActive[0] = false;
EyeRenderActive[1] = false;
OVR_CAPI_VISION_CODE( pPoseTracker = 0; )
RenderingConfigured = false;
BeginFrameCalled = false;
BeginFrameThreadId = 0;
BeginFrameTimingCalled = false;
}
HMDState::~HMDState()
{
OVR_ASSERT(GlobalState::pInstance);
StopSensor();
ConfigureRendering(0,0,0,0);
OVR_CAPI_VISION_CODE( OVR_ASSERT(pPoseTracker == 0); )
GlobalState::pInstance->RemoveHMD(this);
}
//-------------------------------------------------------------------------------------
// *** Sensor
bool HMDState::StartSensor(unsigned supportedCaps, unsigned requiredCaps)
{
Lock::Locker lockScope(&DevicesLock);
bool crystalCoveOrBetter = (HMDInfo.HmdType == HmdType_CrystalCoveProto) ||
(HMDInfo.HmdType == HmdType_DK2);
bool sensorCreatedJustNow = false;
// TBD: In case of sensor not being immediately available, it would be good to check
// yaw config availability to match it with ovrHmdCap_YawCorrection requirement.
//
if (!crystalCoveOrBetter)
{
if (requiredCaps & ovrSensorCap_Position)
{
pLastError = "ovrSensorCap_Position not supported on this HMD.";
return false;
}
}
supportedCaps |= requiredCaps;
if (pHMD && !pSensor)
{
// Zero AddSensorCount before creation, in case it fails (or succeeds but then
// immediately gets disconnected) followed by another Add notification.
AddSensorCount = 0;
pSensor = *pHMD->GetSensor();
sensorCreatedJustNow= true;
if (pSensor)
{
pSensor->SetReportRate(500);
SFusion.AttachToSensor(pSensor);
applyProfileToSensorFusion();
}
else
{
if (requiredCaps & ovrSensorCap_Orientation)
{
pLastError = "Failed to create sensor.";
return false;
}
}
}
if ((requiredCaps & ovrSensorCap_YawCorrection) && !pSensor->IsMagCalibrated())
{
pLastError = "ovrHmdCap_YawCorrection not available.";
if (sensorCreatedJustNow)
{
SFusion.AttachToSensor(0);
SFusion.Reset();
pSensor.Clear();
}
return false;
}
SFusion.SetYawCorrectionEnabled((supportedCaps & ovrSensorCap_YawCorrection) != 0);
if (pSensor && sensorCreatedJustNow)
{
LogText("Sensor created.\n");
SensorCreated = true;
}
updateDK2FeaturesTiedToSensor(sensorCreatedJustNow);
#ifdef OVR_CAPI_VISIONSUPPORT
if (crystalCoveOrBetter && (supportedCaps & ovrSensorCap_Position))
{
if (!pPoseTracker)
{
pPoseTracker = new Vision::PoseTracker(SFusion);
if (pPoseTracker)
{
pPoseTracker->AssociateHMD(pSensor);
LogText("Sensor Pose tracker created.\n");
}
}
// TBD: How do we verify that position tracking is actually available
// i.e. camera is plugged in?
}
else if (pPoseTracker)
{
// TBD: Internals not thread safe - must fix!!
delete pPoseTracker;
pPoseTracker = 0;
LogText("Sensor Pose tracker destroyed.\n");
}
#endif // OVR_CAPI_VISIONSUPPORT
SensorCaps = supportedCaps;
SensorStarted = true;
return true;
}
// Stops sensor sampling, shutting down internal resources.
void HMDState::StopSensor()
{
Lock::Locker lockScope(&DevicesLock);
if (SensorStarted)
{
#ifdef OVR_CAPI_VISIONSUPPORT
if (pPoseTracker)
{
// TBD: Internals not thread safe - must fix!!
delete pPoseTracker;
pPoseTracker = 0;
LogText("Sensor Pose tracker destroyed.\n");
}
#endif // OVR_CAPI_VISION_CODE
SFusion.AttachToSensor(0);
SFusion.Reset();
pSensor.Clear();
HmdCapsAppliedToSensor = 0;
AddSensorCount = 0;
SensorCaps = 0;
SensorCreated = false;
SensorStarted = false;
LogText("StopSensor succeeded.\n");
}
}
// Resets sensor orientation.
void HMDState::ResetSensor()
{
SFusion.Reset();
}
// Returns prediction for time.
ovrSensorState HMDState::PredictedSensorState(double absTime)
{
SensorState ss;
// We are trying to keep this path lockless unless we are notified of new device
// creation while not having a sensor yet. It's ok to check SensorCreated volatile
// flag here, since GetSensorStateAtTime() is internally lockless and safe.
if (SensorCreated || checkCreateSensor())
{
ss = SFusion.GetSensorStateAtTime(absTime);
if (!(ss.StatusFlags & ovrStatus_OrientationTracked))
{
Lock::Locker lockScope(&DevicesLock);
#ifdef OVR_CAPI_VISIONSUPPORT
if (pPoseTracker)
{
// TBD: Internals not thread safe - must fix!!
delete pPoseTracker;
pPoseTracker = 0;
LogText("Sensor Pose tracker destroyed.\n");
}
#endif // OVR_CAPI_VISION_CODE
// Not needed yet; SFusion.AttachToSensor(0);
// This seems to reset orientation anyway...
pSensor.Clear();
SensorCreated = false;
HmdCapsAppliedToSensor = 0;
}
}
else
{
// SensorState() defaults to 0s.
// ss.Pose.Orientation = Quatf();
// ..
// John:
// We still want valid times so frames will get a delta-time
// and allow operation with a joypad when the sensor isn't
// connected.
ss.Recorded.TimeInSeconds = absTime;
ss.Predicted.TimeInSeconds = absTime;
}
ss.StatusFlags |= ovrStatus_HmdConnected;
return ss;
}
bool HMDState::checkCreateSensor()
{
if (!(SensorStarted && !SensorCreated && AddSensorCount))
return false;
Lock::Locker lockScope(&DevicesLock);
// Re-check condition once in the lock, in case the state changed.
if (SensorStarted && !SensorCreated && AddSensorCount)
{
if (pHMD)
{
AddSensorCount = 0;
pSensor = *pHMD->GetSensor();
}
if (pSensor)
{
pSensor->SetReportRate(500);
SFusion.AttachToSensor(pSensor);
SFusion.SetYawCorrectionEnabled((SensorCaps & ovrSensorCap_YawCorrection) != 0);
applyProfileToSensorFusion();
#ifdef OVR_CAPI_VISIONSUPPORT
if (SensorCaps & ovrSensorCap_Position)
{
pPoseTracker = new Vision::PoseTracker(SFusion);
if (pPoseTracker)
{
pPoseTracker->AssociateHMD(pSensor);
}
LogText("Sensor Pose tracker created.\n");
}
#endif // OVR_CAPI_VISION_CODE
LogText("Sensor created.\n");
SensorCreated = true;
return true;
}
}
return SensorCreated;
}
bool HMDState::GetSensorDesc(ovrSensorDesc* descOut)
{
Lock::Locker lockScope(&DevicesLock);
if (SensorCreated)
{
OVR_ASSERT(pSensor);
OVR::SensorInfo si;
pSensor->GetDeviceInfo(&si);
descOut->VendorId = si.VendorId;
descOut->ProductId = si.ProductId;
OVR_ASSERT(si.SerialNumber.GetSize() <= sizeof(descOut->SerialNumber));
OVR_strcpy(descOut->SerialNumber, sizeof(descOut->SerialNumber), si.SerialNumber.ToCStr());
return true;
}
return false;
}
void HMDState::applyProfileToSensorFusion()
{
if (!pHMD)
return;
Profile* profile = pHMD->GetProfile();
if (!profile)
{
OVR_ASSERT(false);
return;
}
SFusion.SetUserHeadDimensions ( *profile, RenderState.RenderInfo );
}
void HMDState::updateLowPersistenceMode(bool lowPersistence) const
{
OVR_ASSERT(pSensor);
DisplayReport dr;
if (pSensor.GetPtr())
{
pSensor->GetDisplayReport(&dr);
dr.Persistence = (UInt16) (dr.TotalRows * (lowPersistence ? 0.18f : 1.0f));
dr.Brightness = lowPersistence ? 255 : 0;
pSensor->SetDisplayReport(dr);
}
}
void HMDState::updateLatencyTestForHmd(bool latencyTesting)
{
if (pSensor.GetPtr())
{
DisplayReport dr;
pSensor->GetDisplayReport(&dr);
dr.ReadPixel = latencyTesting;
pSensor->SetDisplayReport(dr);
}
if (latencyTesting)
{
LatencyUtil2.SetSensorDevice(pSensor.GetPtr());
}
else
{
LatencyUtil2.SetSensorDevice(NULL);
}
}
void HMDState::updateDK2FeaturesTiedToSensor(bool sensorCreatedJustNow)
{
Lock::Locker lockScope(&DevicesLock);
if (!SensorCreated || (HMDInfo.HmdType != HmdType_DK2))
return;
// Only send display reports if state changed or sensor initializing first time.
if (sensorCreatedJustNow ||
((HmdCapsAppliedToSensor ^ EnabledHmdCaps) & ovrHmdCap_LowPersistence))
{
updateLowPersistenceMode((EnabledHmdCaps & ovrHmdCap_LowPersistence) ? true : false);
}
if (sensorCreatedJustNow || ((HmdCapsAppliedToSensor ^ EnabledHmdCaps) & ovrHmdCap_LatencyTest))
{
updateLatencyTestForHmd((EnabledHmdCaps & ovrHmdCap_LatencyTest) != 0);
}
HmdCapsAppliedToSensor = EnabledHmdCaps & (ovrHmdCap_LowPersistence|ovrHmdCap_LatencyTest);
}
void HMDState::SetEnabledHmdCaps(unsigned hmdCaps)
{
if (HMDInfo.HmdType == HmdType_DK2)
{
if ((EnabledHmdCaps ^ hmdCaps) & ovrHmdCap_DynamicPrediction)
{
// DynamicPrediction change
TimeManager.ResetFrameTiming(TimeManager.GetFrameTiming().FrameIndex,
(hmdCaps & ovrHmdCap_DynamicPrediction) ? true : false,
RenderingConfigured);
}
}
if ((EnabledHmdCaps ^ hmdCaps) & ovrHmdCap_NoVSync)
{
TimeManager.SetVsync((hmdCaps & ovrHmdCap_NoVSync) ? false : true);
}
EnabledHmdCaps = hmdCaps & ovrHmdCap_Writable_Mask;
RenderState.EnabledHmdCaps = EnabledHmdCaps;
// Unfortunately, LowPersistance and other flags are tied to sensor.
// This flag will apply the state of sensor is created; otherwise this will be delayed
// till StartSensor.
// Such behavior is less then ideal, but should be resolved with the service model.
updateDK2FeaturesTiedToSensor(false);
}
//-------------------------------------------------------------------------------------
// ***** Property Access
// TBD: This all needs to be cleaned up and organized into namespaces.
float HMDState::getFloatValue(const char* propertyName, float defaultVal)
{
if (OVR_strcmp(propertyName, "LensSeparation") == 0)
{
return HMDInfo.LensSeparationInMeters;
}
else if (OVR_strcmp(propertyName, "CenterPupilDepth") == 0)
{
return SFusion.GetCenterPupilDepth();
}
else if (pHMD)
{
Profile* p = pHMD->GetProfile();
if (p)
{
return p->GetFloatValue(propertyName, defaultVal);
}
}
return defaultVal;
}
bool HMDState::setFloatValue(const char* propertyName, float value)
{
if (OVR_strcmp(propertyName, "CenterPupilDepth") == 0)
{
SFusion.SetCenterPupilDepth(value);
return true;
}
return false;
}
static unsigned CopyFloatArrayWithLimit(float dest[], unsigned destSize,
float source[], unsigned sourceSize)
{
unsigned count = Alg::Min(destSize, sourceSize);
for (unsigned i = 0; i < count; i++)
dest[i] = source[i];
return count;
}
unsigned HMDState::getFloatArray(const char* propertyName, float values[], unsigned arraySize)
{
if (arraySize)
{
if (OVR_strcmp(propertyName, "ScreenSize") == 0)
{
float data[2] = { HMDInfo.ScreenSizeInMeters.w, HMDInfo.ScreenSizeInMeters.h };
return CopyFloatArrayWithLimit(values, arraySize, data, 2);
}
else if (OVR_strcmp(propertyName, "DistortionClearColor") == 0)
{
return CopyFloatArrayWithLimit(values, arraySize, RenderState.ClearColor, 4);
}
else if (OVR_strcmp(propertyName, "DK2Latency") == 0)
{
if (HMDInfo.HmdType != HmdType_DK2)
return 0;
float data[3];
TimeManager.GetLatencyTimings(data);
return CopyFloatArrayWithLimit(values, arraySize, data, 3);
}
/*
else if (OVR_strcmp(propertyName, "CenterPupilDepth") == 0)
{
if (arraySize >= 1)
{
values[0] = SFusion.GetCenterPupilDepth();
return 1;
}
return 0;
} */
else if (pHMD)
{
Profile* p = pHMD->GetProfile();
// TBD: Not quite right. Should update profile interface, so that
// we can return 0 in all conditions if property doesn't exist.
if (p)
{
unsigned count = p->GetFloatValues(propertyName, values, arraySize);
return count;
}
}
}
return 0;
}
bool HMDState::setFloatArray(const char* propertyName, float values[], unsigned arraySize)
{
if (!arraySize)
return false;
if (OVR_strcmp(propertyName, "DistortionClearColor") == 0)
{
CopyFloatArrayWithLimit(RenderState.ClearColor, 4, values, arraySize);
return true;
}
return false;
}
const char* HMDState::getString(const char* propertyName, const char* defaultVal)
{
if (pHMD)
{
// For now, just access the profile.
Profile* p = pHMD->GetProfile();
LastGetStringValue[0] = 0;
if (p && p->GetValue(propertyName, LastGetStringValue, sizeof(LastGetStringValue)))
{
return LastGetStringValue;
}
}
return defaultVal;
}
//-------------------------------------------------------------------------------------
// *** Latency Test
bool HMDState::ProcessLatencyTest(unsigned char rgbColorOut[3])
{
bool result = false;
// Check create.
if (pLatencyTester)
{
if (pLatencyTester->IsConnected())
{
Color colorToDisplay;
LatencyUtil.ProcessInputs();
result = LatencyUtil.DisplayScreenColor(colorToDisplay);
rgbColorOut[0] = colorToDisplay.R;
rgbColorOut[1] = colorToDisplay.G;
rgbColorOut[2] = colorToDisplay.B;
}
else
{
// Disconnect.
LatencyUtil.SetDevice(NULL);
pLatencyTester = 0;
LogText("LATENCY SENSOR disconnected.\n");
}
}
else if (AddLatencyTestCount > 0)
{
// This might have some unlikely race condition issue which could cause us to miss a device...
AddLatencyTestCount = 0;
pLatencyTester = *GlobalState::pInstance->GetManager()->
EnumerateDevices<LatencyTestDevice>().CreateDevice();
if (pLatencyTester)
{
LatencyUtil.SetDevice(pLatencyTester);
LogText("LATENCY TESTER connected\n");
}
}
return result;
}
void HMDState::ProcessLatencyTest2(unsigned char rgbColorOut[3], double startTime)
{
// Check create.
if (!(EnabledHmdCaps & ovrHmdCap_LatencyTest))
return;
if (pLatencyTesterDisplay && !LatencyUtil2.HasDisplayDevice())
{
if (!pLatencyTesterDisplay->IsConnected())
{
LatencyUtil2.SetDisplayDevice(NULL);
}
}
else if (AddLatencyTestDisplayCount > 0)
{
// This might have some unlikely race condition issue
// which could cause us to miss a device...
AddLatencyTestDisplayCount = 0;
pLatencyTesterDisplay = *GlobalState::pInstance->GetManager()->
EnumerateDevices<LatencyTestDevice>().CreateDevice();
if (pLatencyTesterDisplay)
{
LatencyUtil2.SetDisplayDevice(pLatencyTesterDisplay);
}
}
if (LatencyUtil2.HasDevice() && pSensor && pSensor->IsConnected())
{
LatencyUtil2.BeginTest(startTime);
Color colorToDisplay;
LatencyTest2Active = LatencyUtil2.DisplayScreenColor(colorToDisplay);
rgbColorOut[0] = colorToDisplay.R;
rgbColorOut[1] = colorToDisplay.G;
rgbColorOut[2] = colorToDisplay.B;
}
else
{
LatencyTest2Active = false;
}
}
//-------------------------------------------------------------------------------------
// *** Rendering
bool HMDState::ConfigureRendering(ovrEyeRenderDesc eyeRenderDescOut[2],
const ovrFovPort eyeFovIn[2],
const ovrRenderAPIConfig* apiConfig,
unsigned distortionCaps)
{
ThreadChecker::Scope checkScope(&RenderAPIThreadChecker, "ovrHmd_ConfigureRendering");
// null -> shut down.
if (!apiConfig)
{
if (pRenderer)
pRenderer.Clear();
RenderingConfigured = false;
return true;
}
if (pRenderer &&
(apiConfig->Header.API != pRenderer->GetRenderAPI()))
{
// Shutdown old renderer.
if (pRenderer)
pRenderer.Clear();
}
// Step 1: do basic setup configuration
RenderState.setupRenderDesc(eyeRenderDescOut, eyeFovIn);
RenderState.EnabledHmdCaps = EnabledHmdCaps; // This is a copy... Any cleaner way?
RenderState.DistortionCaps = distortionCaps;
TimeManager.ResetFrameTiming(0,
(EnabledHmdCaps & ovrHmdCap_DynamicPrediction) ? true : false,
true);
LastFrameTimeSeconds = 0.0f;
// Set RenderingConfigured early to avoid ASSERTs in renderer initialization.
RenderingConfigured = true;
if (!pRenderer)
{
pRenderer = *DistortionRenderer::APICreateRegistry
[apiConfig->Header.API](this, TimeManager, RenderState);
}
if (!pRenderer ||
!pRenderer->Initialize(apiConfig, distortionCaps))
{
RenderingConfigured = false;
return false;
}
return true;
}
ovrPosef HMDState::BeginEyeRender(ovrEyeType eye)
{
// Debug checks.
checkBeginFrameScope("ovrHmd_BeginEyeRender");
ThreadChecker::Scope checkScope(&RenderAPIThreadChecker, "ovrHmd_BeginEyeRender");
// Unknown eyeId provided in ovrHmd_BeginEyeRender
OVR_ASSERT_LOG(eye == ovrEye_Left || eye == ovrEye_Right,
("ovrHmd_BeginEyeRender eyeId out of range."));
OVR_ASSERT_LOG(EyeRenderActive[eye] == false,
("Multiple calls to ovrHmd_BeginEyeRender for the same eye."));
EyeRenderActive[eye] = true;
// Only process latency tester for drawing the left eye (assumes left eye is drawn first)
if (pRenderer && eye == 0)
{
LatencyTestActive = ProcessLatencyTest(LatencyTestDrawColor);
}
return ovrHmd_GetEyePose(this, eye);
}
void HMDState::EndEyeRender(ovrEyeType eye, ovrPosef renderPose, ovrTexture* eyeTexture)
{
// Debug checks.
checkBeginFrameScope("ovrHmd_EndEyeRender");
ThreadChecker::Scope checkScope(&RenderAPIThreadChecker, "ovrHmd_EndEyeRender");
if (!EyeRenderActive[eye])
{
OVR_ASSERT_LOG(false,
("ovrHmd_EndEyeRender called without ovrHmd_BeginEyeRender."));
return;
}
RenderState.EyeRenderPoses[eye] = renderPose;
if (pRenderer)
pRenderer->SubmitEye(eye, eyeTexture);
EyeRenderActive[eye] = false;
}
}} // namespace OVR::CAPI

View File

@ -0,0 +1,347 @@
/************************************************************************************
Filename : CAPI_HMDState.h
Content : State associated with a single HMD
Created : January 24, 2014
Authors : Michael Antonov
Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.1
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#ifndef OVR_CAPI_HMDState_h
#define OVR_CAPI_HMDState_h
#include "../Kernel/OVR_Math.h"
#include "../Kernel/OVR_List.h"
#include "../Kernel/OVR_Log.h"
#include "../OVR_CAPI.h"
#include "../OVR_SensorFusion.h"
#include "../Util/Util_LatencyTest.h"
#include "../Util/Util_LatencyTest2.h"
#include "CAPI_FrameTimeManager.h"
#include "CAPI_HMDRenderState.h"
#include "CAPI_DistortionRenderer.h"
// Define OVR_CAPI_VISIONSUPPORT to compile in vision support
#ifdef OVR_CAPI_VISIONSUPPORT
#define OVR_CAPI_VISION_CODE(c) c
#include "../Vision/Vision_PoseTracker.h"
#else
#define OVR_CAPI_VISION_CODE(c)
#endif
struct ovrHmdStruct { };
namespace OVR { namespace CAPI {
using namespace OVR::Util::Render;
//-------------------------------------------------------------------------------------
// ***** ThreadChecker
// This helper class is used to verify that the API is used according to supported
// thread safety constraints (is not re-entrant for this and related functions).
class ThreadChecker
{
public:
#ifndef OVR_BUILD_DEBUG
// In release build, thread checks are disabled.
ThreadChecker() { }
void Begin(const char* functionName) { OVR_UNUSED1(functionName); }
void End() { }
// Add thread-re-entrancy check for function scope
struct Scope
{
Scope(ThreadChecker*, const char *) { }
~Scope() { }
};
#else // OVR_BUILD_DEBUG
ThreadChecker() : pFunctionName(0), FirstThread(0)
{ }
void Begin(const char* functionName)
{
if (!pFunctionName)
{
pFunctionName = functionName;
FirstThread = GetCurrentThreadId();
}
else
{
// pFunctionName may be not null here if function is called internally on the same thread.
OVR_ASSERT_LOG((FirstThread == GetCurrentThreadId()),
("%s (threadId=%p) called at the same times as %s (threadId=%p)\n",
functionName, GetCurrentThreadId(), pFunctionName, FirstThread) );
}
}
void End()
{
pFunctionName = 0;
FirstThread = 0;
}
// Add thread-re-entrancy check for function scope.
struct Scope
{
Scope(ThreadChecker* threadChecker, const char *functionName) : pChecker(threadChecker)
{ pChecker->Begin(functionName); }
~Scope()
{ pChecker->End(); }
private:
ThreadChecker* pChecker;
};
private:
// If not 0, contains the name of the function that first entered the scope.
const char * pFunctionName;
ThreadId FirstThread;
#endif // OVR_BUILD_DEBUG
};
//-------------------------------------------------------------------------------------
// ***** HMDState
// Describes a single HMD.
class HMDState : public ListNode<HMDState>,
public ovrHmdStruct, public NewOverrideBase
{
public:
HMDState(HMDDevice* device);
HMDState(ovrHmdType hmdType);
virtual ~HMDState();
// *** Sensor Setup
bool StartSensor(unsigned supportedCaps, unsigned requiredCaps);
void StopSensor();
void ResetSensor();
ovrSensorState PredictedSensorState(double absTime);
bool GetSensorDesc(ovrSensorDesc* descOut);
// Changes HMD Caps.
// Capability bits that are not directly or logically tied to one system (such as sensor)
// are grouped here. ovrHmdCap_VSync, for example, affects rendering and timing.
void SetEnabledHmdCaps(unsigned caps);
bool ProcessLatencyTest(unsigned char rgbColorOut[3]);
void ProcessLatencyTest2(unsigned char rgbColorOut[3], double startTime);
// *** Rendering Setup
bool ConfigureRendering(ovrEyeRenderDesc eyeRenderDescOut[2],
const ovrFovPort eyeFovIn[2],
const ovrRenderAPIConfig* apiConfig,
unsigned distortionCaps);
ovrPosef BeginEyeRender(ovrEyeType eye);
void EndEyeRender(ovrEyeType eye, ovrPosef renderPose, ovrTexture* eyeTexture);
const char* GetLastError()
{
const char* p = pLastError;
pLastError = 0;
return p;
}
void NotifyAddDevice(DeviceType deviceType)
{
if (deviceType == Device_Sensor)
AddSensorCount++;
else if (deviceType == Device_LatencyTester)
{
AddLatencyTestCount++;
AddLatencyTestDisplayCount++;
}
}
bool checkCreateSensor();
void applyProfileToSensorFusion();
// INlines so that they can be easily compiled out.
// Does debug ASSERT checks for functions that require BeginFrame.
// Also verifies that we are on the right thread.
void checkBeginFrameScope(const char* functionName)
{
OVR_UNUSED1(functionName); // for Release build.
OVR_ASSERT_LOG(BeginFrameCalled == true,
("%s called outside ovrHmd_BeginFrame.", functionName));
OVR_ASSERT_LOG(BeginFrameThreadId == OVR::GetCurrentThreadId(),
("%s called on a different thread then ovrHmd_BeginFrame.", functionName));
}
void checkRenderingConfigured(const char* functionName)
{
OVR_UNUSED1(functionName); // for Release build.
OVR_ASSERT_LOG(RenderingConfigured == true,
("%s called without ovrHmd_ConfigureRendering.", functionName));
}
void checkBeginFrameTimingScope(const char* functionName)
{
OVR_UNUSED1(functionName); // for Release build.
OVR_ASSERT_LOG(BeginFrameTimingCalled == true,
("%s called outside ovrHmd_BeginFrameTiming.", functionName));
}
HMDState* getThis() { return this; }
void updateLowPersistenceMode(bool lowPersistence) const;
void updateLatencyTestForHmd(bool latencyTesting);
void updateDK2FeaturesTiedToSensor(bool sensorCreatedJustNow);
// Get properties by name.
float getFloatValue(const char* propertyName, float defaultVal);
bool setFloatValue(const char* propertyName, float value);
unsigned getFloatArray(const char* propertyName, float values[], unsigned arraySize);
bool setFloatArray(const char* propertyName, float values[], unsigned arraySize);
const char* getString(const char* propertyName, const char* defaultVal);
public:
// Wrapper to support 'const'
struct HMDInfoWrapper
{
HMDInfoWrapper(ovrHmdType hmdType)
{
HmdTypeEnum t = HmdType_None;
if (hmdType == ovrHmd_DK1)
t = HmdType_DK1;
else if (hmdType == ovrHmd_CrystalCoveProto)
t = HmdType_CrystalCoveProto;
else if (hmdType == ovrHmd_DK2)
t = HmdType_DK2;
h = CreateDebugHMDInfo(t);
}
HMDInfoWrapper(HMDDevice* device) { if (device) device->GetDeviceInfo(&h); }
OVR::HMDInfo h;
};
// Note: pHMD can be null if we are representing a virtualized debug HMD.
Ptr<HMDDevice> pHMD;
// HMDInfo shouldn't change, as its string pointers are passed out.
const HMDInfoWrapper HMDInfoW;
const OVR::HMDInfo& HMDInfo;
const char* pLastError;
// Caps enabled for the HMD.
unsigned EnabledHmdCaps;
// These are the flags actually applied to the Sensor device,
// used to track whether SetDisplayReport calls are necessary.
unsigned HmdCapsAppliedToSensor;
// *** Sensor
// Lock used to support thread-safe lifetime access to sensor.
Lock DevicesLock;
// Atomic integer used as a flag that we should check the sensor device.
AtomicInt<int> AddSensorCount;
// All of Sensor variables may be modified/used with DevicesLock, with exception that
// the {SensorStarted, SensorCreated} can be read outside the lock to see
// if device creation check is necessary.
// Whether we called StartSensor() and requested sensor caps.
volatile bool SensorStarted;
volatile bool SensorCreated;
// pSensor may still be null or non-running after start if it wasn't yet available
Ptr<SensorDevice> pSensor; // Head
unsigned SensorCaps;
// SensorFusion state may be accessible without a lock.
SensorFusion SFusion;
// Vision pose tracker is currently new-allocated
OVR_CAPI_VISION_CODE(
Vision::PoseTracker* pPoseTracker;
)
// Latency tester
Ptr<LatencyTestDevice> pLatencyTester;
Util::LatencyTest LatencyUtil;
AtomicInt<int> AddLatencyTestCount;
bool LatencyTestActive;
unsigned char LatencyTestDrawColor[3];
// Using latency tester as debug display
Ptr<LatencyTestDevice> pLatencyTesterDisplay;
AtomicInt<int> AddLatencyTestDisplayCount;
Util::LatencyTest2 LatencyUtil2;
bool LatencyTest2Active;
unsigned char LatencyTest2DrawColor[3];
//bool ReadbackColor;
// Rendering part
FrameTimeManager TimeManager;
HMDRenderState RenderState;
Ptr<DistortionRenderer> pRenderer;
// Last timing value reported by BeginFrame.
double LastFrameTimeSeconds;
// Last timing value reported by GetFrameTime. These are separate since the intended
// use is from different threads. TBD: Move to FrameTimeManager? Make atomic?
double LastGetFrameTimeSeconds;
// Last cached value returned by ovrHmd_GetString/ovrHmd_GetStringArray.
char LastGetStringValue[256];
// Debug flag set after ovrHmd_ConfigureRendering succeeds.
bool RenderingConfigured;
// Set after BeginFrame succeeds, and its corresponding thread id for debug checks.
bool BeginFrameCalled;
ThreadId BeginFrameThreadId;
// Graphics functions are not re-entrant from other threads.
ThreadChecker RenderAPIThreadChecker;
//
bool BeginFrameTimingCalled;
// Flags set when we've called BeginEyeRender on a given eye.
bool EyeRenderActive[2];
};
}} // namespace OVR::CAPI
#endif // OVR_CAPI_HMDState_h

View File

@ -0,0 +1,784 @@
/************************************************************************************
Filename : CAPI_GL_DistortionRenderer.h
Content : Distortion renderer header for GL
Created : November 11, 2013
Authors : David Borel, Lee Cooper
Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved.
Use of this software is subject to the terms of the Oculus Inc license
agreement provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
************************************************************************************/
#include "CAPI_GL_DistortionRenderer.h"
#include "CAPI_GL_DistortionShaders.h"
#include "../../OVR_CAPI_GL.h"
namespace OVR { namespace CAPI { namespace GL {
// Distortion pixel shader lookup.
// Bit 0: Chroma Correction
// Bit 1: Timewarp
enum {
DistortionVertexShaderBitMask = 3,
DistortionVertexShaderCount = DistortionVertexShaderBitMask + 1,
DistortionPixelShaderBitMask = 1,
DistortionPixelShaderCount = DistortionPixelShaderBitMask + 1
};
struct ShaderInfo
{
const char* ShaderData;
size_t ShaderSize;
const ShaderBase::Uniform* ReflectionData;
size_t ReflectionSize;
};
// Do add a new distortion shader use these macros (with or w/o reflection)
#define SI_NOREFL(shader) { shader, sizeof(shader), NULL, 0 }
#define SI_REFL__(shader) { shader, sizeof(shader), shader ## _refl, sizeof( shader ## _refl )/sizeof(*(shader ## _refl)) }
static ShaderInfo DistortionVertexShaderLookup[DistortionVertexShaderCount] =
{
SI_REFL__(Distortion_vs),
SI_REFL__(DistortionChroma_vs),
SI_REFL__(DistortionTimewarp_vs),
SI_REFL__(DistortionTimewarpChroma_vs)
};
static ShaderInfo DistortionPixelShaderLookup[DistortionPixelShaderCount] =
{
SI_NOREFL(Distortion_fs),
SI_NOREFL(DistortionChroma_fs)
};
void DistortionShaderBitIndexCheck()
{
OVR_COMPILER_ASSERT(ovrDistortionCap_Chromatic == 1);
OVR_COMPILER_ASSERT(ovrDistortionCap_TimeWarp == 2);
}
struct DistortionVertex
{
Vector2f Pos;
Vector2f TexR;
Vector2f TexG;
Vector2f TexB;
Color Col;
};
// Vertex type; same format is used for all shapes for simplicity.
// Shapes are built by adding vertices to Model.
struct LatencyVertex
{
Vector3f Pos;
LatencyVertex (const Vector3f& p) : Pos(p) {}
};
//----------------------------------------------------------------------------
// ***** GL::DistortionRenderer
DistortionRenderer::DistortionRenderer(ovrHmd hmd, FrameTimeManager& timeManager,
const HMDRenderState& renderState)
: CAPI::DistortionRenderer(ovrRenderAPI_OpenGL, hmd, timeManager, renderState)
, LatencyVAO(0)
{
DistortionMeshVAOs[0] = 0;
DistortionMeshVAOs[1] = 0;
}
DistortionRenderer::~DistortionRenderer()
{
destroy();
}
// static
CAPI::DistortionRenderer* DistortionRenderer::Create(ovrHmd hmd,
FrameTimeManager& timeManager,
const HMDRenderState& renderState)
{
#if !defined(OVR_OS_MAC)
InitGLExtensions();
#endif
return new DistortionRenderer(hmd, timeManager, renderState);
}
bool DistortionRenderer::Initialize(const ovrRenderAPIConfig* apiConfig,
unsigned distortionCaps)
{
GfxState = *new GraphicsState();
const ovrGLConfig* config = (const ovrGLConfig*)apiConfig;
if (!config)
{
// Cleanup
pEyeTextures[0].Clear();
pEyeTextures[1].Clear();
memset(&RParams, 0, sizeof(RParams));
return true;
}
RParams.Multisample = config->OGL.Header.Multisample;
RParams.RTSize = config->OGL.Header.RTSize;
#if defined(OVR_OS_WIN32)
RParams.Window = (config->OGL.Window) ? config->OGL.Window : GetActiveWindow();
#elif defined(OVR_OS_LINUX)
RParams.Disp = (config->OGL.Disp) ? config->OGL.Disp : XOpenDisplay(NULL);
RParams.Win = config->OGL.Win;
if (!RParams.Win)
{
int unused;
XGetInputFocus(RParams.Disp, &RParams.Win, &unused);
}
#endif
DistortionCaps = distortionCaps;
//DistortionWarper.SetVsync((hmdCaps & ovrHmdCap_NoVSync) ? false : true);
pEyeTextures[0] = *new Texture(&RParams, 0, 0);
pEyeTextures[1] = *new Texture(&RParams, 0, 0);
initBuffersAndShaders();
return true;
}
void DistortionRenderer::SubmitEye(int eyeId, ovrTexture* eyeTexture)
{
// Doesn't do a lot in here??
const ovrGLTexture* tex = (const ovrGLTexture*)eyeTexture;
// Write in values
eachEye[eyeId].texture = tex->OGL.TexId;
if (tex)
{
// Its only at this point we discover what the viewport of the texture is.
// because presumably we allow users to realtime adjust the resolution.
eachEye[eyeId].TextureSize = tex->OGL.Header.TextureSize;
eachEye[eyeId].RenderViewport = tex->OGL.Header.RenderViewport;
const ovrEyeRenderDesc& erd = RState.EyeRenderDesc[eyeId];
ovrHmd_GetRenderScaleAndOffset( erd.Fov,
eachEye[eyeId].TextureSize, eachEye[eyeId].RenderViewport,
eachEye[eyeId].UVScaleOffset );
pEyeTextures[eyeId]->UpdatePlaceholderTexture(tex->OGL.TexId,
tex->OGL.Header.TextureSize);
}
}
void DistortionRenderer::EndFrame(bool swapBuffers,
unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor)
{
if (!TimeManager.NeedDistortionTimeMeasurement())
{
if (RState.DistortionCaps & ovrDistortionCap_TimeWarp)
{
// Wait for timewarp distortion if it is time and Gpu idle
FlushGpuAndWaitTillTime(TimeManager.GetFrameTiming().TimewarpPointTime);
}
renderDistortion(pEyeTextures[0], pEyeTextures[1]);
}
else
{
// If needed, measure distortion time so that TimeManager can better estimate
// latency-reducing time-warp wait timing.
WaitUntilGpuIdle();
double distortionStartTime = ovr_GetTimeInSeconds();
renderDistortion(pEyeTextures[0], pEyeTextures[1]);
WaitUntilGpuIdle();
TimeManager.AddDistortionTimeMeasurement(ovr_GetTimeInSeconds() - distortionStartTime);
}
if(latencyTesterDrawColor)
{
renderLatencyQuad(latencyTesterDrawColor);
}
else if(latencyTester2DrawColor)
{
renderLatencyPixel(latencyTester2DrawColor);
}
if (swapBuffers)
{
bool useVsync = ((RState.EnabledHmdCaps & ovrHmdCap_NoVSync) == 0);
int swapInterval = (useVsync) ? 1 : 0;
#if defined(OVR_OS_WIN32)
if (wglGetSwapIntervalEXT() != swapInterval)
wglSwapIntervalEXT(swapInterval);
HDC dc = GetDC(RParams.Window);
BOOL success = SwapBuffers(dc);
ReleaseDC(RParams.Window, dc);
OVR_ASSERT(success);
OVR_UNUSED(success);
#elif defined(OVR_OS_MAC)
CGLContextObj context = CGLGetCurrentContext();
GLint currentSwapInterval = 0;
CGLGetParameter(context, kCGLCPSwapInterval, &currentSwapInterval);
if (currentSwapInterval != swapInterval)
CGLSetParameter(context, kCGLCPSwapInterval, &swapInterval);
CGLFlushDrawable(context);
#elif defined(OVR_OS_LINUX)
static const char* extensions = glXQueryExtensionsString(RParams.Disp, 0);
static bool supportsVSync = (extensions != NULL && strstr(extensions, "GLX_EXT_swap_control"));
if (supportsVSync)
{
GLuint currentSwapInterval = 0;
glXQueryDrawable(RParams.Disp, RParams.Win, GLX_SWAP_INTERVAL_EXT, &currentSwapInterval);
if (currentSwapInterval != swapInterval)
glXSwapIntervalEXT(RParams.Disp, RParams.Win, swapInterval);
}
glXSwapBuffers(RParams.Disp, RParams.Win);
#endif
}
}
void DistortionRenderer::WaitUntilGpuIdle()
{
glFlush();
glFinish();
}
double DistortionRenderer::FlushGpuAndWaitTillTime(double absTime)
{
double initialTime = ovr_GetTimeInSeconds();
if (initialTime >= absTime)
return 0.0;
glFlush();
glFinish();
double newTime = initialTime;
volatile int i;
while (newTime < absTime)
{
for (int j = 0; j < 50; j++)
i = 0;
newTime = ovr_GetTimeInSeconds();
}
// How long we waited
return newTime - initialTime;
}
DistortionRenderer::GraphicsState::GraphicsState()
{
const char* glVersionString = (const char*)glGetString(GL_VERSION);
OVR_DEBUG_LOG(("GL_VERSION STRING: %s", (const char*)glVersionString));
char prefix[64];
bool foundVersion = false;
for (int i = 10; i < 30; ++i)
{
int major = i / 10;
int minor = i % 10;
OVR_sprintf(prefix, 64, "%d.%d", major, minor);
if (strstr(glVersionString, prefix) == glVersionString)
{
GlMajorVersion = major;
GlMinorVersion = minor;
foundVersion = true;
break;
}
}
if (!foundVersion)
{
glGetIntegerv(GL_MAJOR_VERSION, &GlMajorVersion);
glGetIntegerv(GL_MAJOR_VERSION, &GlMinorVersion);
}
OVR_ASSERT(GlMajorVersion >= 2);
if (GlMajorVersion >= 3)
{
SupportsVao = true;
}
else
{
const char* extensions = (const char*)glGetString(GL_EXTENSIONS);
SupportsVao = (strstr("GL_ARB_vertex_array_object", extensions) != NULL);
}
}
void DistortionRenderer::GraphicsState::ApplyBool(GLenum Name, GLint Value)
{
if (Value != 0)
glEnable(Name);
else
glDisable(Name);
}
void DistortionRenderer::GraphicsState::Save()
{
glGetIntegerv(GL_VIEWPORT, Viewport);
glGetFloatv(GL_COLOR_CLEAR_VALUE, ClearColor);
glGetIntegerv(GL_DEPTH_TEST, &DepthTest);
glGetIntegerv(GL_CULL_FACE, &CullFace);
glGetIntegerv(GL_CURRENT_PROGRAM, &Program);
glGetIntegerv(GL_ACTIVE_TEXTURE, &ActiveTexture);
glGetIntegerv(GL_TEXTURE_BINDING_2D, &TextureBinding);
glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &VertexArray);
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &FrameBufferBinding);
glGetIntegerv(GL_BLEND, &Blend);
glGetIntegerv(GL_COLOR_WRITEMASK, ColorWritemask);
glGetIntegerv(GL_DITHER, &Dither);
glGetIntegerv(GL_RASTERIZER_DISCARD, &RasterizerDiscard);
if (GlMajorVersion >= 3 && GlMajorVersion >= 2)
glGetIntegerv(GL_SAMPLE_MASK, &SampleMask);
glGetIntegerv(GL_SCISSOR_TEST, &ScissorTest);
IsValid = true;
}
void DistortionRenderer::GraphicsState::Restore()
{
// Don't allow restore-before-save.
if (!IsValid)
return;
glViewport(Viewport[0], Viewport[1], Viewport[2], Viewport[3]);
glClearColor(ClearColor[0], ClearColor[1], ClearColor[2], ClearColor[3]);
ApplyBool(GL_DEPTH_TEST, DepthTest);
ApplyBool(GL_CULL_FACE, CullFace);
glUseProgram(Program);
glActiveTexture(ActiveTexture);
glBindTexture(GL_TEXTURE_2D, TextureBinding);
if (SupportsVao)
glBindVertexArray(VertexArray);
glBindFramebuffer(GL_FRAMEBUFFER, FrameBufferBinding);
ApplyBool(GL_BLEND, Blend);
glColorMask((GLboolean)ColorWritemask[0], (GLboolean)ColorWritemask[1], (GLboolean)ColorWritemask[2], (GLboolean)ColorWritemask[3]);
ApplyBool(GL_DITHER, Dither);
ApplyBool(GL_RASTERIZER_DISCARD, RasterizerDiscard);
if (GlMajorVersion >= 3 && GlMajorVersion >= 2)
ApplyBool(GL_SAMPLE_MASK, SampleMask);
ApplyBool(GL_SCISSOR_TEST, ScissorTest);
}
void DistortionRenderer::initBuffersAndShaders()
{
for ( int eyeNum = 0; eyeNum < 2; eyeNum++ )
{
// Allocate & generate distortion mesh vertices.
ovrDistortionMesh meshData;
// double startT = ovr_GetTimeInSeconds();
if (!ovrHmd_CreateDistortionMesh( HMD,
RState.EyeRenderDesc[eyeNum].Eye,
RState.EyeRenderDesc[eyeNum].Fov,
RState.DistortionCaps,
&meshData) )
{
OVR_ASSERT(false);
continue;
}
// Now parse the vertex data and create a render ready vertex buffer from it
DistortionVertex * pVBVerts = (DistortionVertex*)OVR_ALLOC ( sizeof(DistortionVertex) * meshData.VertexCount );
DistortionVertex * pCurVBVert = pVBVerts;
ovrDistortionVertex* pCurOvrVert = meshData.pVertexData;
for ( unsigned vertNum = 0; vertNum < meshData.VertexCount; vertNum++ )
{
pCurVBVert->Pos.x = pCurOvrVert->Pos.x;
pCurVBVert->Pos.y = pCurOvrVert->Pos.y;
pCurVBVert->TexR = (*(Vector2f*)&pCurOvrVert->TexR);
pCurVBVert->TexG = (*(Vector2f*)&pCurOvrVert->TexG);
pCurVBVert->TexB = (*(Vector2f*)&pCurOvrVert->TexB);
// Convert [0.0f,1.0f] to [0,255]
pCurVBVert->Col.R = (OVR::UByte)( pCurOvrVert->VignetteFactor * 255.99f );
pCurVBVert->Col.G = pCurVBVert->Col.R;
pCurVBVert->Col.B = pCurVBVert->Col.R;
pCurVBVert->Col.A = (OVR::UByte)( pCurOvrVert->TimeWarpFactor * 255.99f );;
pCurOvrVert++;
pCurVBVert++;
}
DistortionMeshVBs[eyeNum] = *new Buffer(&RParams);
DistortionMeshVBs[eyeNum]->Data ( Buffer_Vertex | Buffer_ReadOnly, pVBVerts, sizeof(DistortionVertex) * meshData.VertexCount );
DistortionMeshIBs[eyeNum] = *new Buffer(&RParams);
DistortionMeshIBs[eyeNum]->Data ( Buffer_Index | Buffer_ReadOnly, meshData.pIndexData, ( sizeof(SInt16) * meshData.IndexCount ) );
OVR_FREE ( pVBVerts );
ovrHmd_DestroyDistortionMesh( &meshData );
}
initShaders();
}
void DistortionRenderer::renderDistortion(Texture* leftEyeTexture, Texture* rightEyeTexture)
{
GraphicsState* glState = (GraphicsState*)GfxState.GetPtr();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
setViewport( Recti(0,0, RParams.RTSize.w, RParams.RTSize.h) );
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glDisable(GL_BLEND);
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_FALSE);
glDisable(GL_DITHER);
glDisable(GL_RASTERIZER_DISCARD);
if (glState->GlMajorVersion >= 3 && glState->GlMajorVersion >= 2)
glDisable(GL_SAMPLE_MASK);
glDisable(GL_SCISSOR_TEST);
glClearColor(
RState.ClearColor[0],
RState.ClearColor[1],
RState.ClearColor[2],
RState.ClearColor[3] );
glClear(GL_COLOR_BUFFER_BIT);
for (int eyeNum = 0; eyeNum < 2; eyeNum++)
{
ShaderFill distortionShaderFill(DistortionShader);
distortionShaderFill.SetTexture(0, eyeNum == 0 ? leftEyeTexture : rightEyeTexture);
DistortionShader->SetUniform2f("EyeToSourceUVScale", eachEye[eyeNum].UVScaleOffset[0].x, eachEye[eyeNum].UVScaleOffset[0].y);
DistortionShader->SetUniform2f("EyeToSourceUVOffset", eachEye[eyeNum].UVScaleOffset[1].x, eachEye[eyeNum].UVScaleOffset[1].y);
if (DistortionCaps & ovrDistortionCap_TimeWarp)
{
ovrMatrix4f timeWarpMatrices[2];
ovrHmd_GetEyeTimewarpMatrices(HMD, (ovrEyeType)eyeNum,
RState.EyeRenderPoses[eyeNum], timeWarpMatrices);
// Feed identity like matrices in until we get proper timewarp calculation going on
DistortionShader->SetUniform4x4f("EyeRotationStart", Matrix4f(timeWarpMatrices[0]).Transposed());
DistortionShader->SetUniform4x4f("EyeRotationEnd", Matrix4f(timeWarpMatrices[1]).Transposed());
renderPrimitives(&distortionShaderFill, DistortionMeshVBs[eyeNum], DistortionMeshIBs[eyeNum],
0, (int)DistortionMeshIBs[eyeNum]->GetSize()/2, Prim_Triangles, &DistortionMeshVAOs[eyeNum], true);
}
else
{
renderPrimitives(&distortionShaderFill, DistortionMeshVBs[eyeNum], DistortionMeshIBs[eyeNum],
0, (int)DistortionMeshIBs[eyeNum]->GetSize()/2, Prim_Triangles, &DistortionMeshVAOs[eyeNum], true);
}
}
}
void DistortionRenderer::createDrawQuad()
{
const int numQuadVerts = 4;
LatencyTesterQuadVB = *new Buffer(&RParams);
if(!LatencyTesterQuadVB)
{
return;
}
LatencyTesterQuadVB->Data(Buffer_Vertex, NULL, numQuadVerts * sizeof(LatencyVertex));
LatencyVertex* vertices = (LatencyVertex*)LatencyTesterQuadVB->Map(0, numQuadVerts * sizeof(LatencyVertex), Map_Discard);
if(!vertices)
{
OVR_ASSERT(false); // failed to lock vertex buffer
return;
}
const float left = -1.0f;
const float top = -1.0f;
const float right = 1.0f;
const float bottom = 1.0f;
vertices[0] = LatencyVertex(Vector3f(left, top, 0.0f));
vertices[1] = LatencyVertex(Vector3f(left, bottom, 0.0f));
vertices[2] = LatencyVertex(Vector3f(right, top, 0.0f));
vertices[3] = LatencyVertex(Vector3f(right, bottom, 0.0f));
LatencyTesterQuadVB->Unmap(vertices);
}
void DistortionRenderer::renderLatencyQuad(unsigned char* latencyTesterDrawColor)
{
const int numQuadVerts = 4;
if(!LatencyTesterQuadVB)
{
createDrawQuad();
}
ShaderFill quadFill(SimpleQuadShader);
//quadFill.SetInputLayout(SimpleQuadVertexIL);
setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h));
SimpleQuadShader->SetUniform2f("Scale", 0.2f, 0.2f);
SimpleQuadShader->SetUniform4f("Color", (float)latencyTesterDrawColor[0] / 255.99f,
(float)latencyTesterDrawColor[0] / 255.99f,
(float)latencyTesterDrawColor[0] / 255.99f,
1.0f);
for(int eyeNum = 0; eyeNum < 2; eyeNum++)
{
SimpleQuadShader->SetUniform2f("PositionOffset", eyeNum == 0 ? -0.4f : 0.4f, 0.0f);
renderPrimitives(&quadFill, LatencyTesterQuadVB, NULL, 0, numQuadVerts, Prim_TriangleStrip, &LatencyVAO, false);
}
}
void DistortionRenderer::renderLatencyPixel(unsigned char* latencyTesterPixelColor)
{
const int numQuadVerts = 4;
if(!LatencyTesterQuadVB)
{
createDrawQuad();
}
ShaderFill quadFill(SimpleQuadShader);
setViewport(Recti(0,0, RParams.RTSize.w, RParams.RTSize.h));
SimpleQuadShader->SetUniform4f("Color", (float)latencyTesterPixelColor[0] / 255.99f,
(float)latencyTesterPixelColor[0] / 255.99f,
(float)latencyTesterPixelColor[0] / 255.99f,
1.0f);
Vector2f scale(2.0f / RParams.RTSize.w, 2.0f / RParams.RTSize.h);
SimpleQuadShader->SetUniform2f("Scale", scale.x, scale.y);
SimpleQuadShader->SetUniform2f("PositionOffset", 1.0f, 1.0f);
renderPrimitives(&quadFill, LatencyTesterQuadVB, NULL, 0, numQuadVerts, Prim_TriangleStrip, &LatencyVAO, false);
}
void DistortionRenderer::renderPrimitives(
const ShaderFill* fill,
Buffer* vertices, Buffer* indices,
int offset, int count,
PrimitiveType rprim, GLuint* vao, bool isDistortionMesh)
{
GraphicsState* glState = (GraphicsState*)GfxState.GetPtr();
GLenum prim;
switch (rprim)
{
case Prim_Triangles:
prim = GL_TRIANGLES;
break;
case Prim_Lines:
prim = GL_LINES;
break;
case Prim_TriangleStrip:
prim = GL_TRIANGLE_STRIP;
break;
default:
OVR_ASSERT(false);
return;
}
fill->Set();
GLuint prog = fill->GetShaders()->Prog;
if (vao != NULL)
{
if (*vao != 0)
{
glBindVertexArray(*vao);
if (isDistortionMesh)
glDrawElements(prim, count, GL_UNSIGNED_SHORT, NULL);
else
glDrawArrays(prim, 0, count);
}
else
{
if (glState->SupportsVao)
{
glGenVertexArrays(1, vao);
glBindVertexArray(*vao);
}
int attributeCount = (isDistortionMesh) ? 5 : 1;
int* locs = new int[attributeCount];
glBindBuffer(GL_ARRAY_BUFFER, ((Buffer*)vertices)->GLBuffer);
if (isDistortionMesh)
{
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ((Buffer*)indices)->GLBuffer);
locs[0] = glGetAttribLocation(prog, "Position");
locs[1] = glGetAttribLocation(prog, "Color");
locs[2] = glGetAttribLocation(prog, "TexCoord0");
locs[3] = glGetAttribLocation(prog, "TexCoord1");
locs[4] = glGetAttribLocation(prog, "TexCoord2");
glVertexAttribPointer(locs[0], 2, GL_FLOAT, false, sizeof(DistortionVertex), reinterpret_cast<char*>(offset)+offsetof(DistortionVertex, Pos));
glVertexAttribPointer(locs[1], 4, GL_UNSIGNED_BYTE, true, sizeof(DistortionVertex), reinterpret_cast<char*>(offset)+offsetof(DistortionVertex, Col));
glVertexAttribPointer(locs[2], 2, GL_FLOAT, false, sizeof(DistortionVertex), reinterpret_cast<char*>(offset)+offsetof(DistortionVertex, TexR));
glVertexAttribPointer(locs[3], 2, GL_FLOAT, false, sizeof(DistortionVertex), reinterpret_cast<char*>(offset)+offsetof(DistortionVertex, TexG));
glVertexAttribPointer(locs[4], 2, GL_FLOAT, false, sizeof(DistortionVertex), reinterpret_cast<char*>(offset)+offsetof(DistortionVertex, TexB));
}
else
{
locs[0] = glGetAttribLocation(prog, "Position");
glVertexAttribPointer(locs[0], 3, GL_FLOAT, false, sizeof(LatencyVertex), reinterpret_cast<char*>(offset)+offsetof(LatencyVertex, Pos));
}
for (int i = 0; i < attributeCount; ++i)
glEnableVertexAttribArray(locs[i]);
if (isDistortionMesh)
glDrawElements(prim, count, GL_UNSIGNED_SHORT, NULL);
else
glDrawArrays(prim, 0, count);
if (!glState->SupportsVao)
{
for (int i = 0; i < attributeCount; ++i)
glDisableVertexAttribArray(locs[i]);
}
delete[] locs;
}
}
}
void DistortionRenderer::setViewport(const Recti& vp)
{
glViewport(vp.x, vp.y, vp.w, vp.h);
}
void DistortionRenderer::initShaders()
{
GraphicsState* glState = (GraphicsState*)GfxState.GetPtr();
const char* shaderPrefix =
(glState->GlMajorVersion < 3 || (glState->GlMajorVersion == 3 && glState->GlMinorVersion < 2)) ?
glsl2Prefix : glsl3Prefix;
{
ShaderInfo vsInfo = DistortionVertexShaderLookup[DistortionVertexShaderBitMask & DistortionCaps];
size_t vsSize = strlen(shaderPrefix)+vsInfo.ShaderSize;
char* vsSource = new char[vsSize];
OVR_strcpy(vsSource, vsSize, shaderPrefix);
OVR_strcat(vsSource, vsSize, vsInfo.ShaderData);
Ptr<GL::VertexShader> vs = *new GL::VertexShader(
&RParams,
(void*)vsSource, vsSize,
vsInfo.ReflectionData, vsInfo.ReflectionSize);
DistortionShader = *new ShaderSet;
DistortionShader->SetShader(vs);
delete[](vsSource);
ShaderInfo psInfo = DistortionPixelShaderLookup[DistortionPixelShaderBitMask & DistortionCaps];
size_t psSize = strlen(shaderPrefix)+psInfo.ShaderSize;
char* psSource = new char[psSize];
OVR_strcpy(psSource, psSize, shaderPrefix);
OVR_strcat(psSource, psSize, psInfo.ShaderData);
Ptr<GL::FragmentShader> ps = *new GL::FragmentShader(
&RParams,
(void*)psSource, psSize,
psInfo.ReflectionData, psInfo.ReflectionSize);
DistortionShader->SetShader(ps);
delete[](psSource);
}
{
size_t vsSize = strlen(shaderPrefix)+sizeof(SimpleQuad_vs);
char* vsSource = new char[vsSize];
OVR_strcpy(vsSource, vsSize, shaderPrefix);
OVR_strcat(vsSource, vsSize, SimpleQuad_vs);
Ptr<GL::VertexShader> vs = *new GL::VertexShader(
&RParams,
(void*)vsSource, vsSize,
SimpleQuad_vs_refl, sizeof(SimpleQuad_vs_refl) / sizeof(SimpleQuad_vs_refl[0]));
SimpleQuadShader = *new ShaderSet;
SimpleQuadShader->SetShader(vs);
delete[](vsSource);
size_t psSize = strlen(shaderPrefix)+sizeof(SimpleQuad_fs);
char* psSource = new char[psSize];
OVR_strcpy(psSource, psSize, shaderPrefix);
OVR_strcat(psSource, psSize, SimpleQuad_fs);
Ptr<GL::FragmentShader> ps = *new GL::FragmentShader(
&RParams,
(void*)psSource, psSize,
SimpleQuad_fs_refl, sizeof(SimpleQuad_fs_refl) / sizeof(SimpleQuad_fs_refl[0]));
SimpleQuadShader->SetShader(ps);
delete[](psSource);
}
}
void DistortionRenderer::destroy()
{
GraphicsState* glState = (GraphicsState*)GfxState.GetPtr();
for(int eyeNum = 0; eyeNum < 2; eyeNum++)
{
if (glState->SupportsVao)
glDeleteVertexArrays(1, &DistortionMeshVAOs[eyeNum]);
DistortionMeshVAOs[eyeNum] = 0;
DistortionMeshVBs[eyeNum].Clear();
DistortionMeshIBs[eyeNum].Clear();
}
if (DistortionShader)
{
DistortionShader->UnsetShader(Shader_Vertex);
DistortionShader->UnsetShader(Shader_Pixel);
DistortionShader.Clear();
}
LatencyTesterQuadVB.Clear();
LatencyVAO = 0;
}
}}} // OVR::CAPI::GL

View File

@ -0,0 +1,178 @@
/************************************************************************************
Filename : CAPI_GL_DistortionRenderer.h
Content : Distortion renderer header for GL
Created : November 11, 2013
Authors : David Borel, Lee Cooper
Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved.
Use of this software is subject to the terms of the Oculus Inc license
agreement provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
************************************************************************************/
#ifndef OVR_CAPI_GL_DistortionRenderer_h
#define OVR_CAPI_GL_DistortionRenderer_h
#include "../CAPI_DistortionRenderer.h"
#include "../../Kernel/OVR_Log.h"
#include "CAPI_GL_Util.h"
namespace OVR { namespace CAPI { namespace GL {
// ***** GL::DistortionRenderer
// Implementation of DistortionRenderer for GL.
class DistortionRenderer : public CAPI::DistortionRenderer
{
public:
DistortionRenderer(ovrHmd hmd,
FrameTimeManager& timeManager,
const HMDRenderState& renderState);
~DistortionRenderer();
// Creation function for the device.
static CAPI::DistortionRenderer* Create(ovrHmd hmd,
FrameTimeManager& timeManager,
const HMDRenderState& renderState);
// ***** Public DistortionRenderer interface
virtual bool Initialize(const ovrRenderAPIConfig* apiConfig,
unsigned distortionCaps);
virtual void SubmitEye(int eyeId, ovrTexture* eyeTexture);
virtual void EndFrame(bool swapBuffers, unsigned char* latencyTesterDrawColor, unsigned char* latencyTester2DrawColor);
void WaitUntilGpuIdle();
// Similar to ovr_WaitTillTime but it also flushes GPU.
// Note, it exits when time expires, even if GPU is not in idle state yet.
double FlushGpuAndWaitTillTime(double absTime);
protected:
class GraphicsState : public CAPI::DistortionRenderer::GraphicsState
{
public:
GraphicsState();
virtual void Save();
virtual void Restore();
protected:
void ApplyBool(GLenum Name, GLint Value);
public:
GLint GlMajorVersion;
GLint GlMinorVersion;
bool SupportsVao;
GLint Viewport[4];
GLfloat ClearColor[4];
GLint DepthTest;
GLint CullFace;
GLint Program;
GLint ActiveTexture;
GLint TextureBinding;
GLint VertexArray;
GLint FrameBufferBinding;
GLint Blend;
GLint ColorWritemask[4];
GLint Dither;
GLint Fog;
GLint Lighting;
GLint RasterizerDiscard;
GLint RenderMode;
GLint SampleMask;
GLint ScissorTest;
GLfloat ZoomX;
GLfloat ZoomY;
};
// TBD: Should we be using oe from RState instead?
unsigned DistortionCaps;
struct FOR_EACH_EYE
{
FOR_EACH_EYE() : TextureSize(0), RenderViewport(Sizei(0)) { }
#if 0
IDirect3DVertexBuffer9 * dxVerts;
IDirect3DIndexBuffer9 * dxIndices;
#endif
int numVerts;
int numIndices;
GLuint texture;
ovrVector2f UVScaleOffset[2];
Sizei TextureSize;
Recti RenderViewport;
} eachEye[2];
// GL context and utility variables.
RenderParams RParams;
// Helpers
void initBuffersAndShaders();
void initShaders();
void initFullscreenQuad();
void destroy();
void setViewport(const Recti& vp);
void renderDistortion(Texture* leftEyeTexture, Texture* rightEyeTexture);
void renderPrimitives(const ShaderFill* fill, Buffer* vertices, Buffer* indices,
int offset, int count,
PrimitiveType rprim, GLuint* vao, bool isDistortionMesh);
void createDrawQuad();
void renderLatencyQuad(unsigned char* latencyTesterDrawColor);
void renderLatencyPixel(unsigned char* latencyTesterPixelColor);
Ptr<Texture> pEyeTextures[2];
Ptr<Buffer> DistortionMeshVBs[2]; // one per-eye
Ptr<Buffer> DistortionMeshIBs[2]; // one per-eye
GLuint DistortionMeshVAOs[2]; // one per-eye
Ptr<ShaderSet> DistortionShader;
struct StandardUniformData
{
Matrix4f Proj;
Matrix4f View;
} StdUniforms;
GLuint LatencyVAO;
Ptr<Buffer> LatencyTesterQuadVB;
Ptr<ShaderSet> SimpleQuadShader;
Ptr<Texture> CurRenderTarget;
Array<Ptr<Texture> > DepthBuffers;
GLuint CurrentFbo;
GLint SavedViewport[4];
GLfloat SavedClearColor[4];
GLint SavedDepthTest;
GLint SavedCullFace;
GLint SavedProgram;
GLint SavedActiveTexture;
GLint SavedBoundTexture;
GLint SavedVertexArray;
GLint SavedBoundFrameBuffer;
};
}}} // OVR::CAPI::GL
#endif // OVR_CAPI_GL_DistortionRenderer_h

View File

@ -0,0 +1,326 @@
/************************************************************************************
Filename : CAPI_GL_Shaders.h
Content : Distortion shader header for GL
Created : November 11, 2013
Authors : David Borel, Volga Aksoy
Copyright : Copyright 2013 Oculus VR, Inc. All Rights reserved.
Use of this software is subject to the terms of the Oculus Inc license
agreement provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
************************************************************************************/
#ifndef OVR_CAPI_GL_Shaders_h
#define OVR_CAPI_GL_Shaders_h
#include "CAPI_GL_Util.h"
namespace OVR { namespace CAPI { namespace GL {
static const char glsl2Prefix[] =
"#version 110\n"
"#extension GL_ARB_shader_texture_lod : enable\n"
"#define _FRAGCOLOR_DECLARATION\n"
"#define _VS_IN attribute\n"
"#define _VS_OUT varying\n"
"#define _FS_IN varying\n"
"#define _TEXTURELOD texture2DLod\n"
"#define _FRAGCOLOR gl_FragColor\n";
static const char glsl3Prefix[] =
"#version 150\n"
"#define _FRAGCOLOR_DECLARATION out vec4 FragColor;\n"
"#define _VS_IN in\n"
"#define _VS_OUT out\n"
"#define _FS_IN in\n"
"#define _TEXTURELOD textureLod\n"
"#define _FRAGCOLOR FragColor\n";
static const char SimpleQuad_vs[] =
"uniform vec2 PositionOffset;\n"
"uniform vec2 Scale;\n"
"_VS_IN vec3 Position;\n"
"void main()\n"
"{\n"
" gl_Position = vec4(Position.xy * Scale + PositionOffset, 0.5, 1.0);\n"
"}\n";
const OVR::CAPI::GL::ShaderBase::Uniform SimpleQuad_vs_refl[] =
{
{ "PositionOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 },
{ "Scale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 },
};
static const char SimpleQuad_fs[] =
"uniform vec4 Color;\n"
"_FRAGCOLOR_DECLARATION\n"
"void main()\n"
"{\n"
" _FRAGCOLOR = Color;\n"
"}\n";
const OVR::CAPI::GL::ShaderBase::Uniform SimpleQuad_fs_refl[] =
{
{ "Color", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 16 },
};
static const char Distortion_vs[] =
"uniform vec2 EyeToSourceUVScale;\n"
"uniform vec2 EyeToSourceUVOffset;\n"
"_VS_IN vec2 Position;\n"
"_VS_IN vec4 Color;\n"
"_VS_IN vec2 TexCoord0;\n"
"_VS_OUT vec4 oColor;\n"
"_VS_OUT vec2 oTexCoord0;\n"
"void main()\n"
"{\n"
" gl_Position.x = Position.x;\n"
" gl_Position.y = Position.y;\n"
" gl_Position.z = 0.5;\n"
" gl_Position.w = 1.0;\n"
// Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion).
// Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye)
" oTexCoord0 = TexCoord0 * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" oTexCoord0.y = 1.0 - oTexCoord0.y;\n"
" oColor = Color;\n" // Used for vignette fade.
"}\n";
const OVR::CAPI::GL::ShaderBase::Uniform Distortion_vs_refl[] =
{
{ "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 },
{ "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 },
};
static const char Distortion_fs[] =
"uniform sampler2D Texture0;\n"
"_FS_IN vec4 oColor;\n"
"_FS_IN vec2 oTexCoord0;\n"
"_FRAGCOLOR_DECLARATION\n"
"void main()\n"
"{\n"
" _FRAGCOLOR = _TEXTURELOD(Texture0, oTexCoord0, 0.0);\n"
" _FRAGCOLOR.a = 1.0;\n"
"}\n";
static const char DistortionTimewarp_vs[] =
"uniform vec2 EyeToSourceUVScale;\n"
"uniform vec2 EyeToSourceUVOffset;\n"
"uniform mat4 EyeRotationStart;\n"
"uniform mat4 EyeRotationEnd;\n"
"_VS_IN vec2 Position;\n"
"_VS_IN vec4 Color;\n"
"_VS_IN vec2 TexCoord0;\n"
"_FS_IN vec4 oColor;\n"
"_FS_IN vec2 oTexCoord0;\n"
"void main()\n"
"{\n"
" gl_Position.x = Position.x;\n"
" gl_Position.y = Position.y;\n"
" gl_Position.z = 0.0;\n"
" gl_Position.w = 1.0;\n"
// Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion).
// These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD.
" vec3 TanEyeAngle = vec3 ( TexCoord0.x, TexCoord0.y, 1.0 );\n"
// Accurate time warp lerp vs. faster
#if 1
// Apply the two 3x3 timewarp rotations to these vectors.
" vec3 TransformedStart = (EyeRotationStart * vec4(TanEyeAngle, 0)).xyz;\n"
" vec3 TransformedEnd = (EyeRotationEnd * vec4(TanEyeAngle, 0)).xyz;\n"
// And blend between them.
" vec3 Transformed = mix ( TransformedStart, TransformedEnd, Color.a );\n"
#else
" mat4 EyeRotation = mix ( EyeRotationStart, EyeRotationEnd, Color.a );\n"
" vec3 Transformed = EyeRotation * TanEyeAngle;\n"
#endif
// Project them back onto the Z=1 plane of the rendered images.
" float RecipZ = 1.0 / Transformed.z;\n"
" vec2 Flattened = vec2 ( Transformed.x * RecipZ, Transformed.y * RecipZ );\n"
// These are now still in TanEyeAngle space.
// Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye)
" vec2 SrcCoord = Flattened * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" oTexCoord0 = SrcCoord;\n"
" oTexCoord0.y = 1.0-oTexCoord0.y;\n"
" oColor = vec4(Color.r, Color.r, Color.r, Color.r);\n" // Used for vignette fade.
"}\n";
const OVR::CAPI::GL::ShaderBase::Uniform DistortionTimewarp_vs_refl[] =
{
{ "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 },
{ "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 },
};
static const char DistortionChroma_vs[] =
"uniform vec2 EyeToSourceUVScale;\n"
"uniform vec2 EyeToSourceUVOffset;\n"
"_VS_IN vec2 Position;\n"
"_VS_IN vec4 Color;\n"
"_VS_IN vec2 TexCoord0;\n"
"_VS_IN vec2 TexCoord1;\n"
"_VS_IN vec2 TexCoord2;\n"
"_VS_OUT vec4 oColor;\n"
"_VS_OUT vec2 oTexCoord0;\n"
"_VS_OUT vec2 oTexCoord1;\n"
"_VS_OUT vec2 oTexCoord2;\n"
"void main()\n"
"{\n"
" gl_Position.x = Position.x;\n"
" gl_Position.y = Position.y;\n"
" gl_Position.z = 0.5;\n"
" gl_Position.w = 1.0;\n"
// Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion).
// Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye)
" oTexCoord0 = TexCoord0 * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" oTexCoord0.y = 1.0-oTexCoord0.y;\n"
" oTexCoord1 = TexCoord1 * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" oTexCoord1.y = 1.0-oTexCoord1.y;\n"
" oTexCoord2 = TexCoord2 * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" oTexCoord2.y = 1.0-oTexCoord2.y;\n"
" oColor = Color;\n" // Used for vignette fade.
"}\n";
const OVR::CAPI::GL::ShaderBase::Uniform DistortionChroma_vs_refl[] =
{
{ "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 },
{ "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 },
};
static const char DistortionChroma_fs[] =
"uniform sampler2D Texture0;\n"
"_FS_IN vec4 oColor;\n"
"_FS_IN vec2 oTexCoord0;\n"
"_FS_IN vec2 oTexCoord1;\n"
"_FS_IN vec2 oTexCoord2;\n"
"_FRAGCOLOR_DECLARATION\n"
"void main()\n"
"{\n"
" float ResultR = _TEXTURELOD(Texture0, oTexCoord0, 0.0).r;\n"
" float ResultG = _TEXTURELOD(Texture0, oTexCoord1, 0.0).g;\n"
" float ResultB = _TEXTURELOD(Texture0, oTexCoord2, 0.0).b;\n"
" _FRAGCOLOR = vec4(ResultR * oColor.r, ResultG * oColor.g, ResultB * oColor.b, 1.0);\n"
"}\n";
static const char DistortionTimewarpChroma_vs[] =
"uniform vec2 EyeToSourceUVScale;\n"
"uniform vec2 EyeToSourceUVOffset;\n"
"uniform mat4 EyeRotationStart;\n"
"uniform mat4 EyeRotationEnd;\n"
"_VS_IN vec2 Position;\n"
"_VS_IN vec4 Color;\n"
"_VS_IN vec2 TexCoord0;\n"
"_VS_IN vec2 TexCoord1;\n"
"_VS_IN vec2 TexCoord2;\n"
"_VS_OUT vec4 oColor;\n"
"_VS_OUT vec2 oTexCoord0;\n"
"_VS_OUT vec2 oTexCoord1;\n"
"_VS_OUT vec2 oTexCoord2;\n"
"void main()\n"
"{\n"
" gl_Position.x = Position.x;\n"
" gl_Position.y = Position.y;\n"
" gl_Position.z = 0.0;\n"
" gl_Position.w = 1.0;\n"
// Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic aberration and distortion).
// These are now "real world" vectors in direction (x,y,1) relative to the eye of the HMD.
" vec3 TanEyeAngleR = vec3 ( TexCoord0.x, TexCoord0.y, 1.0 );\n"
" vec3 TanEyeAngleG = vec3 ( TexCoord1.x, TexCoord1.y, 1.0 );\n"
" vec3 TanEyeAngleB = vec3 ( TexCoord2.x, TexCoord2.y, 1.0 );\n"
// Accurate time warp lerp vs. faster
#if 1
// Apply the two 3x3 timewarp rotations to these vectors.
" vec3 TransformedRStart = (EyeRotationStart * vec4(TanEyeAngleR, 0)).xyz;\n"
" vec3 TransformedGStart = (EyeRotationStart * vec4(TanEyeAngleG, 0)).xyz;\n"
" vec3 TransformedBStart = (EyeRotationStart * vec4(TanEyeAngleB, 0)).xyz;\n"
" vec3 TransformedREnd = (EyeRotationEnd * vec4(TanEyeAngleR, 0)).xyz;\n"
" vec3 TransformedGEnd = (EyeRotationEnd * vec4(TanEyeAngleG, 0)).xyz;\n"
" vec3 TransformedBEnd = (EyeRotationEnd * vec4(TanEyeAngleB, 0)).xyz;\n"
// And blend between them.
" vec3 TransformedR = mix ( TransformedRStart, TransformedREnd, Color.a );\n"
" vec3 TransformedG = mix ( TransformedGStart, TransformedGEnd, Color.a );\n"
" vec3 TransformedB = mix ( TransformedBStart, TransformedBEnd, Color.a );\n"
#else
" mat3 EyeRotation;\n"
" EyeRotation[0] = mix ( EyeRotationStart[0], EyeRotationEnd[0], Color.a ).xyz;\n"
" EyeRotation[1] = mix ( EyeRotationStart[1], EyeRotationEnd[1], Color.a ).xyz;\n"
" EyeRotation[2] = mix ( EyeRotationStart[2], EyeRotationEnd[2], Color.a ).xyz;\n"
" vec3 TransformedR = EyeRotation * TanEyeAngleR;\n"
" vec3 TransformedG = EyeRotation * TanEyeAngleG;\n"
" vec3 TransformedB = EyeRotation * TanEyeAngleB;\n"
#endif
// Project them back onto the Z=1 plane of the rendered images.
" float RecipZR = 1.0 / TransformedR.z;\n"
" float RecipZG = 1.0 / TransformedG.z;\n"
" float RecipZB = 1.0 / TransformedB.z;\n"
" vec2 FlattenedR = vec2 ( TransformedR.x * RecipZR, TransformedR.y * RecipZR );\n"
" vec2 FlattenedG = vec2 ( TransformedG.x * RecipZG, TransformedG.y * RecipZG );\n"
" vec2 FlattenedB = vec2 ( TransformedB.x * RecipZB, TransformedB.y * RecipZB );\n"
// These are now still in TanEyeAngle space.
// Scale them into the correct [0-1],[0-1] UV lookup space (depending on eye)
" vec2 SrcCoordR = FlattenedR * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" vec2 SrcCoordG = FlattenedG * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" vec2 SrcCoordB = FlattenedB * EyeToSourceUVScale + EyeToSourceUVOffset;\n"
" oTexCoord0 = SrcCoordR;\n"
" oTexCoord0.y = 1.0-oTexCoord0.y;\n"
" oTexCoord1 = SrcCoordG;\n"
" oTexCoord1.y = 1.0-oTexCoord1.y;\n"
" oTexCoord2 = SrcCoordB;\n"
" oTexCoord2.y = 1.0-oTexCoord2.y;\n"
" oColor = vec4(Color.r, Color.r, Color.r, Color.r);\n" // Used for vignette fade.
"}\n";
const OVR::CAPI::GL::ShaderBase::Uniform DistortionTimewarpChroma_vs_refl[] =
{
{ "EyeToSourceUVScale", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 0, 8 },
{ "EyeToSourceUVOffset", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 8, 8 },
{ "EyeRotationStart", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 16, 64 },
{ "EyeRotationEnd", OVR::CAPI::GL::ShaderBase::VARTYPE_FLOAT, 80, 64 },
};
}}} // OVR::CAPI::GL
#endif // OVR_CAPI_GL_Shaders_h

View File

@ -0,0 +1,530 @@
/************************************************************************************
Filename : Render_GL_Device.cpp
Content : RenderDevice implementation for OpenGL
Created : September 10, 2012
Authors : David Borel, Andrew Reisse
Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#include "CAPI_GL_Util.h"
#include "../../Kernel/OVR_Log.h"
#include <string.h>
namespace OVR { namespace CAPI { namespace GL {
// GL Hooks for non-Mac.
#if !defined(OVR_OS_MAC)
#if defined(OVR_OS_WIN32)
PFNWGLGETPROCADDRESS wglGetProcAddress;
PFNGLENABLEPROC glEnable;
PFNGLDISABLEPROC glDisable;
PFNGLGETFLOATVPROC glGetFloatv;
PFNGLGETINTEGERVPROC glGetIntegerv;
PFNGLGETSTRINGPROC glGetString;
PFNGLCOLORMASKPROC glColorMask;
PFNGLCLEARPROC glClear;
PFNGLCLEARCOLORPROC glClearColor;
PFNGLCLEARDEPTHPROC glClearDepth;
PFNGLVIEWPORTPROC glViewport;
PFNGLDRAWELEMENTSPROC glDrawElements;
PFNGLTEXPARAMETERIPROC glTexParameteri;
PFNGLFLUSHPROC glFlush;
PFNGLFINISHPROC glFinish;
PFNGLDRAWARRAYSPROC glDrawArrays;
PFNGLGENTEXTURESPROC glGenTextures;
PFNGLDELETETEXTURESPROC glDeleteTextures;
PFNGLBINDTEXTUREPROC glBindTexture;
PFNWGLGETSWAPINTERVALEXTPROC wglGetSwapIntervalEXT;
PFNWGLSWAPINTERVALEXTPROC wglSwapIntervalEXT;
#elif defined(OVR_OS_LINUX)
PFNGLXSWAPINTERVALEXTPROC glXSwapIntervalEXT;
#endif
PFNGLDELETESHADERPROC glDeleteShader;
PFNGLBINDFRAMEBUFFERPROC glBindFramebuffer;
PFNGLACTIVETEXTUREPROC glActiveTexture;
PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray;
PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer;
PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray;
PFNGLBINDBUFFERPROC glBindBuffer;
PFNGLUNIFORMMATRIX3FVPROC glUniformMatrix3fv;
PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv;
PFNGLDELETEBUFFERSPROC glDeleteBuffers;
PFNGLBUFFERDATAPROC glBufferData;
PFNGLGENBUFFERSPROC glGenBuffers;
PFNGLMAPBUFFERPROC glMapBuffer;
PFNGLUNMAPBUFFERPROC glUnmapBuffer;
PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog;
PFNGLGETSHADERIVPROC glGetShaderiv;
PFNGLCOMPILESHADERPROC glCompileShader;
PFNGLSHADERSOURCEPROC glShaderSource;
PFNGLCREATESHADERPROC glCreateShader;
PFNGLCREATEPROGRAMPROC glCreateProgram;
PFNGLATTACHSHADERPROC glAttachShader;
PFNGLDETACHSHADERPROC glDetachShader;
PFNGLDELETEPROGRAMPROC glDeleteProgram;
PFNGLUNIFORM1IPROC glUniform1i;
PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation;
PFNGLGETACTIVEUNIFORMPROC glGetActiveUniform;
PFNGLUSEPROGRAMPROC glUseProgram;
PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog;
PFNGLGETPROGRAMIVPROC glGetProgramiv;
PFNGLLINKPROGRAMPROC glLinkProgram;
PFNGLBINDATTRIBLOCATIONPROC glBindAttribLocation;
PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation;
PFNGLUNIFORM4FVPROC glUniform4fv;
PFNGLUNIFORM3FVPROC glUniform3fv;
PFNGLUNIFORM2FVPROC glUniform2fv;
PFNGLUNIFORM1FVPROC glUniform1fv;
PFNGLGENVERTEXARRAYSPROC glGenVertexArrays;
PFNGLDELETEVERTEXARRAYSPROC glDeleteVertexArrays;
PFNGLBINDVERTEXARRAYPROC glBindVertexArray;
#if defined(OVR_OS_WIN32)
void* GetFunction(const char* functionName)
{
return wglGetProcAddress(functionName);
}
#else
void (*GetFunction(const char *functionName))( void )
{
return glXGetProcAddress((GLubyte*)functionName);
}
#endif
void InitGLExtensions()
{
if (glGenVertexArrays)
return;
#if defined(OVR_OS_WIN32)
HINSTANCE hInst = LoadLibrary(L"Opengl32.dll");
if (!hInst)
return;
glGetFloatv = (PFNGLGETFLOATVPROC) GetProcAddress(hInst, "glGetFloatv");
glGetIntegerv = (PFNGLGETINTEGERVPROC) GetProcAddress(hInst, "glGetIntegerv");
glGetString = (PFNGLGETSTRINGPROC) GetProcAddress(hInst, "glGetString");
glEnable = (PFNGLENABLEPROC) GetProcAddress(hInst, "glEnable");
glDisable = (PFNGLDISABLEPROC) GetProcAddress(hInst, "glDisable");
glColorMask = (PFNGLCOLORMASKPROC) GetProcAddress(hInst, "glColorMask");
glClear = (PFNGLCLEARPROC) GetProcAddress(hInst, "glClear" );
glClearColor = (PFNGLCLEARCOLORPROC) GetProcAddress(hInst, "glClearColor");
glClearDepth = (PFNGLCLEARDEPTHPROC) GetProcAddress(hInst, "glClearDepth");
glViewport = (PFNGLVIEWPORTPROC) GetProcAddress(hInst, "glViewport");
glFlush = (PFNGLFLUSHPROC) GetProcAddress(hInst, "glFlush");
glFinish = (PFNGLFINISHPROC) GetProcAddress(hInst, "glFinish");
glDrawArrays = (PFNGLDRAWARRAYSPROC) GetProcAddress(hInst, "glDrawArrays");
glDrawElements = (PFNGLDRAWELEMENTSPROC) GetProcAddress(hInst, "glDrawElements");
glGenTextures = (PFNGLGENTEXTURESPROC) GetProcAddress(hInst,"glGenTextures");
glDeleteTextures = (PFNGLDELETETEXTURESPROC) GetProcAddress(hInst,"glDeleteTextures");
glBindTexture = (PFNGLBINDTEXTUREPROC) GetProcAddress(hInst,"glBindTexture");
glTexParameteri = (PFNGLTEXPARAMETERIPROC) GetProcAddress(hInst, "glTexParameteri");
wglGetProcAddress = (PFNWGLGETPROCADDRESS) GetProcAddress(hInst, "wglGetProcAddress");
wglGetSwapIntervalEXT = (PFNWGLGETSWAPINTERVALEXTPROC) GetFunction("wglGetSwapIntervalEXT");
wglSwapIntervalEXT = (PFNWGLSWAPINTERVALEXTPROC) GetFunction("wglSwapIntervalEXT");
#elif defined(OVR_OS_LINUX)
glXSwapIntervalEXT = (PFNGLXSWAPINTERVALEXTPROC) GetFunction("glXSwapIntervalEXT");
#endif
glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC) GetFunction("glBindFramebufferEXT");
glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC) GetFunction("glGenVertexArrays");
glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC) GetFunction("glDeleteVertexArrays");
glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC) GetFunction("glBindVertexArray");
glGenBuffers = (PFNGLGENBUFFERSPROC) GetFunction("glGenBuffers");
glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) GetFunction("glDeleteBuffers");
glBindBuffer = (PFNGLBINDBUFFERPROC) GetFunction("glBindBuffer");
glBufferData = (PFNGLBUFFERDATAPROC) GetFunction("glBufferData");
glMapBuffer = (PFNGLMAPBUFFERPROC) GetFunction("glMapBuffer");
glUnmapBuffer = (PFNGLUNMAPBUFFERPROC) GetFunction("glUnmapBuffer");
glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) GetFunction("glDisableVertexAttribArray");
glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) GetFunction("glVertexAttribPointer");
glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) GetFunction("glEnableVertexAttribArray");
glActiveTexture = (PFNGLACTIVETEXTUREPROC) GetFunction("glActiveTexture");
glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC) GetFunction("glUniformMatrix3fv");
glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC) GetFunction("glUniformMatrix4fv");
glUniform1i = (PFNGLUNIFORM1IPROC) GetFunction("glUniform1i");
glUniform1fv = (PFNGLUNIFORM1FVPROC) GetFunction("glUniform1fv");
glUniform2fv = (PFNGLUNIFORM2FVPROC) GetFunction("glUniform2fv");
glUniform3fv = (PFNGLUNIFORM3FVPROC) GetFunction("glUniform3fv");
glUniform2fv = (PFNGLUNIFORM2FVPROC) GetFunction("glUniform2fv");
glUniform4fv = (PFNGLUNIFORM4FVPROC) GetFunction("glUniform4fv");
glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC) GetFunction("glGetUniformLocation");
glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) GetFunction("glGetActiveUniform");
glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC) GetFunction("glGetShaderInfoLog");
glGetShaderiv = (PFNGLGETSHADERIVPROC) GetFunction("glGetShaderiv");
glCompileShader = (PFNGLCOMPILESHADERPROC) GetFunction("glCompileShader");
glShaderSource = (PFNGLSHADERSOURCEPROC) GetFunction("glShaderSource");
glCreateShader = (PFNGLCREATESHADERPROC) GetFunction("glCreateShader");
glDeleteShader = (PFNGLDELETESHADERPROC) GetFunction("glDeleteShader");
glCreateProgram = (PFNGLCREATEPROGRAMPROC) GetFunction("glCreateProgram");
glDeleteProgram = (PFNGLDELETEPROGRAMPROC) GetFunction("glDeleteProgram");
glUseProgram = (PFNGLUSEPROGRAMPROC) GetFunction("glUseProgram");
glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC) GetFunction("glGetProgramInfoLog");
glGetProgramiv = (PFNGLGETPROGRAMIVPROC) GetFunction("glGetProgramiv");
glLinkProgram = (PFNGLLINKPROGRAMPROC) GetFunction("glLinkProgram");
glAttachShader = (PFNGLATTACHSHADERPROC) GetFunction("glAttachShader");
glDetachShader = (PFNGLDETACHSHADERPROC) GetFunction("glDetachShader");
glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC) GetFunction("glBindAttribLocation");
glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC) GetFunction("glGetAttribLocation");
}
#endif
Buffer::Buffer(RenderParams* rp) : pParams(rp), Size(0), Use(0), GLBuffer(0)
{
}
Buffer::~Buffer()
{
if (GLBuffer)
glDeleteBuffers(1, &GLBuffer);
}
bool Buffer::Data(int use, const void* buffer, size_t size)
{
Size = size;
switch (use & Buffer_TypeMask)
{
case Buffer_Index: Use = GL_ELEMENT_ARRAY_BUFFER; break;
default: Use = GL_ARRAY_BUFFER; break;
}
if (!GLBuffer)
glGenBuffers(1, &GLBuffer);
int mode = GL_DYNAMIC_DRAW;
if (use & Buffer_ReadOnly)
mode = GL_STATIC_DRAW;
glBindBuffer(Use, GLBuffer);
glBufferData(Use, size, buffer, mode);
return 1;
}
void* Buffer::Map(size_t, size_t, int)
{
int mode = GL_WRITE_ONLY;
//if (flags & Map_Unsynchronized)
// mode |= GL_MAP_UNSYNCHRONIZED;
glBindBuffer(Use, GLBuffer);
void* v = glMapBuffer(Use, mode);
return v;
}
bool Buffer::Unmap(void*)
{
glBindBuffer(Use, GLBuffer);
int r = glUnmapBuffer(Use);
return r != 0;
}
ShaderSet::ShaderSet()
{
Prog = glCreateProgram();
}
ShaderSet::~ShaderSet()
{
glDeleteProgram(Prog);
}
GLint ShaderSet::GetGLShader(Shader* s)
{
switch (s->Stage)
{
case Shader_Vertex: {
ShaderImpl<Shader_Vertex, GL_VERTEX_SHADER>* gls = (ShaderImpl<Shader_Vertex, GL_VERTEX_SHADER>*)s;
return gls->GLShader;
} break;
case Shader_Fragment: {
ShaderImpl<Shader_Fragment, GL_FRAGMENT_SHADER>* gls = (ShaderImpl<Shader_Fragment, GL_FRAGMENT_SHADER>*)s;
return gls->GLShader;
} break;
default: break;
}
return -1;
}
void ShaderSet::SetShader(Shader *s)
{
Shaders[s->Stage] = s;
GLint GLShader = GetGLShader(s);
glAttachShader(Prog, GLShader);
if (Shaders[Shader_Vertex] && Shaders[Shader_Fragment])
Link();
}
void ShaderSet::UnsetShader(int stage)
{
if (Shaders[stage] == NULL)
return;
GLint GLShader = GetGLShader(Shaders[stage]);
glDetachShader(Prog, GLShader);
Shaders[stage] = NULL;
}
bool ShaderSet::SetUniform(const char* name, int n, const float* v)
{
for (unsigned int i = 0; i < UniformInfo.GetSize(); i++)
if (!strcmp(UniformInfo[i].Name.ToCStr(), name))
{
OVR_ASSERT(UniformInfo[i].Location >= 0);
glUseProgram(Prog);
switch (UniformInfo[i].Type)
{
case 1: glUniform1fv(UniformInfo[i].Location, n, v); break;
case 2: glUniform2fv(UniformInfo[i].Location, n/2, v); break;
case 3: glUniform3fv(UniformInfo[i].Location, n/3, v); break;
case 4: glUniform4fv(UniformInfo[i].Location, n/4, v); break;
case 12: glUniformMatrix3fv(UniformInfo[i].Location, 1, 1, v); break;
case 16: glUniformMatrix4fv(UniformInfo[i].Location, 1, 1, v); break;
default: OVR_ASSERT(0);
}
return 1;
}
OVR_DEBUG_LOG(("Warning: uniform %s not present in selected shader", name));
return 0;
}
bool ShaderSet::Link()
{
glLinkProgram(Prog);
GLint r;
glGetProgramiv(Prog, GL_LINK_STATUS, &r);
if (!r)
{
GLchar msg[1024];
glGetProgramInfoLog(Prog, sizeof(msg), 0, msg);
OVR_DEBUG_LOG(("Linking shaders failed: %s\n", msg));
if (!r)
return 0;
}
glUseProgram(Prog);
UniformInfo.Clear();
LightingVer = 0;
UsesLighting = 0;
GLint uniformCount = 0;
glGetProgramiv(Prog, GL_ACTIVE_UNIFORMS, &uniformCount);
OVR_ASSERT(uniformCount >= 0);
for(GLuint i = 0; i < (GLuint)uniformCount; i++)
{
GLsizei namelen;
GLint size = 0;
GLenum type;
GLchar name[32];
glGetActiveUniform(Prog, i, sizeof(name), &namelen, &size, &type, name);
if (size)
{
int l = glGetUniformLocation(Prog, name);
char *np = name;
while (*np)
{
if (*np == '[')
*np = 0;
np++;
}
Uniform u;
u.Name = name;
u.Location = l;
u.Size = size;
switch (type)
{
case GL_FLOAT: u.Type = 1; break;
case GL_FLOAT_VEC2: u.Type = 2; break;
case GL_FLOAT_VEC3: u.Type = 3; break;
case GL_FLOAT_VEC4: u.Type = 4; break;
case GL_FLOAT_MAT3: u.Type = 12; break;
case GL_FLOAT_MAT4: u.Type = 16; break;
default:
continue;
}
UniformInfo.PushBack(u);
if (!strcmp(name, "LightCount"))
UsesLighting = 1;
}
else
break;
}
ProjLoc = glGetUniformLocation(Prog, "Proj");
ViewLoc = glGetUniformLocation(Prog, "View");
for (int i = 0; i < 8; i++)
{
char texv[32];
OVR_sprintf(texv, 10, "Texture%d", i);
TexLoc[i] = glGetUniformLocation(Prog, texv);
if (TexLoc[i] < 0)
break;
glUniform1i(TexLoc[i], i);
}
if (UsesLighting)
OVR_ASSERT(ProjLoc >= 0 && ViewLoc >= 0);
return 1;
}
bool ShaderBase::SetUniform(const char* name, int n, const float* v)
{
for(unsigned i = 0; i < UniformReflSize; i++)
{
if (!strcmp(UniformRefl[i].Name, name))
{
memcpy(UniformData + UniformRefl[i].Offset, v, n * sizeof(float));
return 1;
}
}
return 0;
}
bool ShaderBase::SetUniformBool(const char* name, int n, const bool* v)
{
OVR_UNUSED(n);
for(unsigned i = 0; i < UniformReflSize; i++)
{
if (!strcmp(UniformRefl[i].Name, name))
{
memcpy(UniformData + UniformRefl[i].Offset, v, UniformRefl[i].Size);
return 1;
}
}
return 0;
}
void ShaderBase::InitUniforms(const Uniform* refl, size_t reflSize)
{
if(!refl)
{
UniformRefl = NULL;
UniformReflSize = 0;
UniformsSize = 0;
if (UniformData)
{
OVR_FREE(UniformData);
UniformData = 0;
}
return; // no reflection data
}
UniformRefl = refl;
UniformReflSize = reflSize;
UniformsSize = UniformRefl[UniformReflSize-1].Offset + UniformRefl[UniformReflSize-1].Size;
UniformData = (unsigned char*)OVR_ALLOC(UniformsSize);
}
Texture::Texture(RenderParams* rp, int w, int h) : IsUserAllocated(true), pParams(rp), TexId(0), Width(w), Height(h)
{
if (w && h)
glGenTextures(1, &TexId);
}
Texture::~Texture()
{
if (TexId && !IsUserAllocated)
glDeleteTextures(1, &TexId);
}
void Texture::Set(int slot, ShaderStage) const
{
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, TexId);
}
void Texture::SetSampleMode(int sm)
{
glBindTexture(GL_TEXTURE_2D, TexId);
switch (sm & Sample_FilterMask)
{
case Sample_Linear:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1);
break;
case Sample_Anisotropic:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, 8);
break;
case Sample_Nearest:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1);
break;
}
switch (sm & Sample_AddressMask)
{
case Sample_Repeat:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
break;
case Sample_Clamp:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
break;
case Sample_ClampBorder:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
break;
}
}
void Texture::UpdatePlaceholderTexture(GLuint texId, const Sizei& textureSize)
{
if (!IsUserAllocated && TexId && texId != TexId)
glDeleteTextures(1, &TexId);
TexId = texId;
Width = textureSize.w;
Height = textureSize.h;
IsUserAllocated = true;
}
}}}

View File

@ -0,0 +1,537 @@
/************************************************************************************
Filename : CAPI_GL_Util.h
Content : Utility header for OpenGL
Created : March 27, 2014
Authors : Andrew Reisse, David Borel
Copyright : Copyright 2012 Oculus VR, Inc. All Rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************************/
#ifndef INC_OVR_CAPI_GL_Util_h
#define INC_OVR_CAPI_GL_Util_h
#include "../../OVR_CAPI.h"
#include "../../Kernel/OVR_Array.h"
#include "../../Kernel/OVR_Math.h"
#include "../../Kernel/OVR_RefCount.h"
#include "../../Kernel/OVR_String.h"
#include "../../Kernel/OVR_Types.h"
#include "../../Kernel/OVR_Log.h"
#if defined(OVR_OS_WIN32)
#include <Windows.h>
#endif
#if defined(OVR_OS_MAC)
#include <OpenGL/gl3.h>
#include <OpenGL/gl3ext.h>
#else
#ifndef GL_GLEXT_PROTOTYPES
#define GL_GLEXT_PROTOTYPES
#endif
#include <GL/gl.h>
#include <GL/glext.h>
#if defined(OVR_OS_WIN32)
#include <GL/wglext.h>
#elif defined(OVR_OS_LINUX)
#include <GL/glx.h>
#endif
#endif
namespace OVR { namespace CAPI { namespace GL {
// GL extension Hooks for Non-Mac.
#if !defined(OVR_OS_MAC)
// Let Windows apps build without linking GL.
#if defined(OVR_OS_WIN32)
typedef void (__stdcall *PFNGLENABLEPROC) (GLenum);
typedef void (__stdcall *PFNGLDISABLEPROC) (GLenum);
typedef void (__stdcall *PFNGLGETFLOATVPROC) (GLenum, GLfloat*);
typedef const GLubyte * (__stdcall *PFNGLGETSTRINGPROC) (GLenum);
typedef void (__stdcall *PFNGLGETINTEGERVPROC) (GLenum, GLint*);
typedef PROC (__stdcall *PFNWGLGETPROCADDRESS) (LPCSTR);
typedef void (__stdcall *PFNGLFLUSHPROC) ();
typedef void (__stdcall *PFNGLFINISHPROC) ();
typedef void (__stdcall *PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count);
typedef void (__stdcall *PFNGLCLEARPROC) (GLbitfield);
typedef void (__stdcall *PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
typedef void (__stdcall *PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const GLvoid *indices);
typedef void (__stdcall *PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures);
typedef void (__stdcall *PFNGLDELETETEXTURESPROC) (GLsizei n, GLuint *textures);
typedef void (__stdcall *PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture);
typedef void (__stdcall *PFNGLCLEARCOLORPROC) (GLfloat r, GLfloat g, GLfloat b, GLfloat a);
typedef void (__stdcall *PFNGLCLEARDEPTHPROC) (GLclampd depth);
typedef void (__stdcall *PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param);
typedef void (__stdcall *PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height);
extern PFNWGLGETPROCADDRESS wglGetProcAddress;
extern PFNWGLGETSWAPINTERVALEXTPROC wglGetSwapIntervalEXT;
extern PFNWGLSWAPINTERVALEXTPROC wglSwapIntervalEXT;
extern PFNGLENABLEPROC glEnable;
extern PFNGLDISABLEPROC glDisable;
extern PFNGLCOLORMASKPROC glColorMask;
extern PFNGLGETFLOATVPROC glGetFloatv;
extern PFNGLGETSTRINGPROC glGetString;
extern PFNGLGETINTEGERVPROC glGetIntegerv;
extern PFNGLCLEARPROC glClear;
extern PFNGLCLEARCOLORPROC glClearColor;
extern PFNGLCLEARDEPTHPROC glClearDepth;
extern PFNGLVIEWPORTPROC glViewport;
extern PFNGLDRAWARRAYSPROC glDrawArrays;
extern PFNGLDRAWELEMENTSPROC glDrawElements;
extern PFNGLGENTEXTURESPROC glGenTextures;
extern PFNGLDELETETEXTURESPROC glDeleteTextures;
extern PFNGLBINDTEXTUREPROC glBindTexture;
extern PFNGLTEXPARAMETERIPROC glTexParameteri;
extern PFNGLFLUSHPROC glFlush;
extern PFNGLFINISHPROC glFinish;
#elif defined(OVR_OS_LINUX)
extern PFNGLXSWAPINTERVALEXTPROC glXSwapIntervalEXT;
#endif // defined(OVR_OS_WIN32)
extern PFNGLDELETESHADERPROC glDeleteShader;
extern PFNGLBINDFRAMEBUFFERPROC glBindFramebuffer;
extern PFNGLACTIVETEXTUREPROC glActiveTexture;
extern PFNGLDISABLEVERTEXATTRIBARRAYPROC glDisableVertexAttribArray;
extern PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer;
extern PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray;
extern PFNGLBINDBUFFERPROC glBindBuffer;
extern PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv;
extern PFNGLDELETEBUFFERSPROC glDeleteBuffers;
extern PFNGLBUFFERDATAPROC glBufferData;
extern PFNGLGENBUFFERSPROC glGenBuffers;
extern PFNGLMAPBUFFERPROC glMapBuffer;
extern PFNGLUNMAPBUFFERPROC glUnmapBuffer;
extern PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog;
extern PFNGLGETSHADERIVPROC glGetShaderiv;
extern PFNGLCOMPILESHADERPROC glCompileShader;
extern PFNGLSHADERSOURCEPROC glShaderSource;
extern PFNGLCREATESHADERPROC glCreateShader;
extern PFNGLCREATEPROGRAMPROC glCreateProgram;
extern PFNGLATTACHSHADERPROC glAttachShader;
extern PFNGLDETACHSHADERPROC glDetachShader;
extern PFNGLDELETEPROGRAMPROC glDeleteProgram;
extern PFNGLUNIFORM1IPROC glUniform1i;
extern PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation;
extern PFNGLGETACTIVEUNIFORMPROC glGetActiveUniform;
extern PFNGLUSEPROGRAMPROC glUseProgram;
extern PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog;
extern PFNGLGETPROGRAMIVPROC glGetProgramiv;
extern PFNGLLINKPROGRAMPROC glLinkProgram;
extern PFNGLBINDATTRIBLOCATIONPROC glBindAttribLocation;
extern PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation;
extern PFNGLUNIFORM4FVPROC glUniform4fv;
extern PFNGLUNIFORM3FVPROC glUniform3fv;
extern PFNGLUNIFORM2FVPROC glUniform2fv;
extern PFNGLUNIFORM1FVPROC glUniform1fv;
extern PFNGLGENVERTEXARRAYSPROC glGenVertexArrays;
extern PFNGLDELETEVERTEXARRAYSPROC glDeleteVertexArrays;
extern PFNGLBINDVERTEXARRAYPROC glBindVertexArray;
extern void InitGLExtensions();
#endif // !defined(OVR_OS_MAC)
// Rendering primitive type used to render Model.
enum PrimitiveType
{
Prim_Triangles,
Prim_Lines,
Prim_TriangleStrip,
Prim_Unknown,
Prim_Count
};
// Types of shaders that can be stored together in a ShaderSet.
enum ShaderStage
{
Shader_Vertex = 0,
Shader_Fragment = 2,
Shader_Pixel = 2,
Shader_Count = 3,
};
enum MapFlags
{
Map_Discard = 1,
Map_Read = 2, // do not use
Map_Unsynchronized = 4, // like D3D11_MAP_NO_OVERWRITE
};
// Buffer types used for uploading geometry & constants.
enum BufferUsage
{
Buffer_Unknown = 0,
Buffer_Vertex = 1,
Buffer_Index = 2,
Buffer_Uniform = 4,
Buffer_TypeMask = 0xff,
Buffer_ReadOnly = 0x100, // Buffer must be created with Data().
};
enum TextureFormat
{
Texture_RGBA = 0x0100,
Texture_Depth = 0x8000,
Texture_TypeMask = 0xff00,
Texture_SamplesMask = 0x00ff,
Texture_RenderTarget = 0x10000,
Texture_GenMipmaps = 0x20000,
};
// Texture sampling modes.
enum SampleMode
{
Sample_Linear = 0,
Sample_Nearest = 1,
Sample_Anisotropic = 2,
Sample_FilterMask = 3,
Sample_Repeat = 0,
Sample_Clamp = 4,
Sample_ClampBorder = 8, // If unsupported Clamp is used instead.
Sample_AddressMask =12,
Sample_Count =13,
};
// Rendering parameters/pointers describing GL rendering setup.
struct RenderParams
{
#if defined(OVR_OS_WIN32)
HWND Window;
#elif defined(OVR_OS_LINUX)
Display* Disp;
Window Win;
#endif
ovrSizei RTSize;
int Multisample;
};
class Buffer : public RefCountBase<Buffer>
{
public:
RenderParams* pParams;
size_t Size;
GLenum Use;
GLuint GLBuffer;
public:
Buffer(RenderParams* r);
~Buffer();
GLuint GetBuffer() { return GLBuffer; }
virtual size_t GetSize() { return Size; }
virtual void* Map(size_t start, size_t size, int flags = 0);
virtual bool Unmap(void *m);
virtual bool Data(int use, const void* buffer, size_t size);
};
class Texture : public RefCountBase<Texture>
{
bool IsUserAllocated;
public:
RenderParams* pParams;
GLuint TexId;
int Width, Height;
Texture(RenderParams* rp, int w, int h);
~Texture();
virtual int GetWidth() const { return Width; }
virtual int GetHeight() const { return Height; }
virtual void SetSampleMode(int sm);
// Updates texture to point to specified resources
// - used for slave rendering.
void UpdatePlaceholderTexture(GLuint texId,
const Sizei& textureSize);
virtual void Set(int slot, ShaderStage stage = Shader_Fragment) const;
};
// Base class for vertex and pixel shaders. Stored in ShaderSet.
class Shader : public RefCountBase<Shader>
{
friend class ShaderSet;
protected:
ShaderStage Stage;
public:
Shader(ShaderStage s) : Stage(s) {}
virtual ~Shader() {}
ShaderStage GetStage() const { return Stage; }
virtual void Set(PrimitiveType) const { }
virtual void SetUniformBuffer(class Buffer* buffers, int i = 0) { OVR_UNUSED2(buffers, i); }
protected:
virtual bool SetUniform(const char* name, int n, const float* v) { OVR_UNUSED3(name, n, v); return false; }
virtual bool SetUniformBool(const char* name, int n, const bool* v) { OVR_UNUSED3(name, n, v); return false; }
};
// A group of shaders, one per stage.
// A ShaderSet is applied for rendering with a given fill.
class ShaderSet : public RefCountBase<ShaderSet>
{
protected:
Ptr<Shader> Shaders[Shader_Count];
struct Uniform
{
String Name;
int Location, Size;
int Type; // currently number of floats in vector
};
Array<Uniform> UniformInfo;
public:
GLuint Prog;
GLint ProjLoc, ViewLoc;
GLint TexLoc[8];
bool UsesLighting;
int LightingVer;
ShaderSet();
~ShaderSet();
virtual void SetShader(Shader *s);
virtual void UnsetShader(int stage);
Shader* GetShader(int stage) { return Shaders[stage]; }
virtual void Set(PrimitiveType prim) const
{
glUseProgram(Prog);
for (int i = 0; i < Shader_Count; i++)
if (Shaders[i])
Shaders[i]->Set(prim);
}
// Set a uniform (other than the standard matrices). It is undefined whether the
// uniforms from one shader occupy the same space as those in other shaders
// (unless a buffer is used, then each buffer is independent).
virtual bool SetUniform(const char* name, int n, const float* v);
bool SetUniform1f(const char* name, float x)
{
const float v[] = {x};
return SetUniform(name, 1, v);
}
bool SetUniform2f(const char* name, float x, float y)
{
const float v[] = {x,y};
return SetUniform(name, 2, v);
}
bool SetUniform3f(const char* name, float x, float y, float z)
{
const float v[] = {x,y,z};
return SetUniform(name, 3, v);
}
bool SetUniform4f(const char* name, float x, float y, float z, float w = 1)
{
const float v[] = {x,y,z,w};
return SetUniform(name, 4, v);
}
bool SetUniformv(const char* name, const Vector3f& v)
{
const float a[] = {v.x,v.y,v.z,1};
return SetUniform(name, 4, a);
}
virtual bool SetUniform4x4f(const char* name, const Matrix4f& m)
{
Matrix4f mt = m.Transposed();
return SetUniform(name, 16, &mt.M[0][0]);
}
protected:
GLint GetGLShader(Shader* s);
bool Link();
};
// Fill combines a ShaderSet (vertex, pixel) with textures, if any.
// Every model has a fill.
class ShaderFill : public RefCountBase<ShaderFill>
{
Ptr<ShaderSet> Shaders;
Ptr<class Texture> Textures[8];
void* InputLayout; // HACK this should be abstracted
public:
ShaderFill(ShaderSet* sh) : Shaders(sh) { InputLayout = NULL; }
ShaderFill(ShaderSet& sh) : Shaders(sh) { InputLayout = NULL; }
ShaderSet* GetShaders() const { return Shaders; }
void* GetInputLayout() const { return InputLayout; }
virtual void Set(PrimitiveType prim = Prim_Unknown) const {
Shaders->Set(prim);
for(int i = 0; i < 8; i++)
{
if(Textures[i])
{
Textures[i]->Set(i);
}
}
}
virtual void SetTexture(int i, class Texture* tex) { if (i < 8) Textures[i] = tex; }
};
struct DisplayId
{
// Windows
String MonitorName; // Monitor name for fullscreen mode
// MacOS
long CgDisplayId; // CGDirectDisplayID
DisplayId() : CgDisplayId(0) {}
DisplayId(long id) : CgDisplayId(id) {}
DisplayId(String m, long id=0) : MonitorName(m), CgDisplayId(id) {}
operator bool () const
{
return MonitorName.GetLength() || CgDisplayId;
}
bool operator== (const DisplayId& b) const
{
return CgDisplayId == b.CgDisplayId &&
(strstr(MonitorName.ToCStr(), b.MonitorName.ToCStr()) ||
strstr(b.MonitorName.ToCStr(), MonitorName.ToCStr()));
}
};
class ShaderBase : public Shader
{
public:
RenderParams* pParams;
unsigned char* UniformData;
int UniformsSize;
enum VarType
{
VARTYPE_FLOAT,
VARTYPE_INT,
VARTYPE_BOOL,
};
struct Uniform
{
const char* Name;
VarType Type;
int Offset, Size;
};
const Uniform* UniformRefl;
size_t UniformReflSize;
ShaderBase(RenderParams* rp, ShaderStage stage) : Shader(stage), pParams(rp), UniformData(0), UniformsSize(0) {}
~ShaderBase()
{
if (UniformData)
OVR_FREE(UniformData);
}
void InitUniforms(const Uniform* refl, size_t reflSize);
bool SetUniform(const char* name, int n, const float* v);
bool SetUniformBool(const char* name, int n, const bool* v);
};
template<ShaderStage SStage, GLenum SType>
class ShaderImpl : public ShaderBase
{
friend class ShaderSet;
public:
ShaderImpl(RenderParams* rp, void* s, size_t size, const Uniform* refl, size_t reflSize)
: ShaderBase(rp, SStage)
, GLShader(0)
{
bool success;
OVR_UNUSED(size);
success = Compile((const char*) s);
OVR_ASSERT(success);
InitUniforms(refl, reflSize);
}
~ShaderImpl()
{
if (GLShader)
{
glDeleteShader(GLShader);
GLShader = 0;
}
}
bool Compile(const char* src)
{
if (!GLShader)
GLShader = glCreateShader(GLStage());
glShaderSource(GLShader, 1, &src, 0);
glCompileShader(GLShader);
GLint r;
glGetShaderiv(GLShader, GL_COMPILE_STATUS, &r);
if (!r)
{
GLchar msg[1024];
glGetShaderInfoLog(GLShader, sizeof(msg), 0, msg);
if (msg[0])
OVR_DEBUG_LOG(("Compiling shader\n%s\nfailed: %s\n", src, msg));
return 0;
}
return 1;
}
GLenum GLStage() const
{
return SType;
}
private:
GLuint GLShader;
};
typedef ShaderImpl<Shader_Vertex, GL_VERTEX_SHADER> VertexShader;
typedef ShaderImpl<Shader_Fragment, GL_FRAGMENT_SHADER> FragmentShader;
}}}
#endif // INC_OVR_CAPI_GL_Util_h