1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
|
/*=========================================================================
*
* Copyright NumFOCUS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkVideoStream_h
#define itkVideoStream_h
#include "itkTemporalDataObject.h"
#include "itkImage.h"
namespace itk
{
/**
* \class VideoStream
* \brief A DataObject that holds a buffered portion of a video
*
* The function of VideoStream is to provide an Image-specific subclass of
* TemporalDataObject. It provides several convenient typedefs to get common
* attributes of the frames. Additionally, the VideoStream caches meta
* information (largest/requested/buffered spatial regions, origin, spacing,
* and direction) about all frames in the video, even those not currently
* buffered. This is done primarily in order to allow the meta data to be set
* before the frame is buffered.
*
* \ingroup ITKVideoCore
*/
template <typename TFrameType>
class ITK_TEMPLATE_EXPORT VideoStream : public TemporalDataObject
{
public:
ITK_DISALLOW_COPY_AND_MOVE(VideoStream);
/** Standard class type aliases */
using Self = VideoStream;
using Superclass = TemporalDataObject;
using Pointer = SmartPointer<Self>;
using ConstPointer = SmartPointer<const Self>;
using ConstWeakPointer = WeakPointer<const Self>;
using FrameType = TFrameType;
using FramePointer = typename FrameType::Pointer;
using FrameConstPointer = typename FrameType::ConstPointer;
using typename Superclass::BufferType;
using SpatialRegionType = typename FrameType::RegionType;
using IndexType = typename FrameType::IndexType;
using PixelType = typename FrameType::PixelType;
using PointType = typename FrameType::PointType;
using SpacingType = typename FrameType::SpacingType;
using SizeType = typename FrameType::SizeType;
using DirectionType = typename FrameType::DirectionType;
using NumberOfComponentsPerPixelType = unsigned int;
/** Types used to store map between frame numbers and frame meta data */
using SpatialRegionMapType = typename std::map<SizeValueType, SpatialRegionType>;
using PointMapType = typename std::map<SizeValueType, PointType>;
using DirectionMapType = typename std::map<SizeValueType, DirectionType>;
using SpacingMapType = typename std::map<SizeValueType, SpacingType>;
using NumberOfComponentsPerPixelMapType = typename std::map<SizeValueType, NumberOfComponentsPerPixelType>;
/** Access the spacial dimensionality of the frames */
static constexpr unsigned int FrameDimension = FrameType::ImageDimension;
static unsigned int
GetFrameDimension()
{
return FrameType::ImageDimension;
}
itkNewMacro(Self);
/** \see LightObject::GetNameOfClass() */
itkOverrideGetNameOfClassMacro(VideoStream);
/** Safely expand the internal ring buffer. */
void
SetMinimumBufferSize(SizeValueType minimumNumberOfFrames);
/** Initialize any empty frames. This method makes sure that the frame buffer
* is large enough to hold the number of frames needed for the buffered
* temporal region. It goes through the necessary number of frames making
* sure that each one has been initialized. When allocating space for frames,
* this method should be called first, followed by setting the spatial
* regions on each frame, before Allocate is called. */
void
InitializeEmptyFrames();
/** Provide access to the internal frame buffer object */
BufferType *
GetFrameBuffer()
{
return reinterpret_cast<BufferType *>(m_DataObjectBuffer.GetPointer());
}
const BufferType *
GetFrameBuffer() const
{
return reinterpret_cast<BufferType *>(m_DataObjectBuffer.GetPointer());
}
/** Set the internal pixel buffer */
void
SetFrameBuffer(BufferType * buffer);
/** Provide access to the internal caches for the meta data */
const SpatialRegionMapType &
GetLargestPossibleSpatialRegionCache() const
{
return m_LargestPossibleSpatialRegionCache;
}
void
SetLargestPossibleSpatialRegionCache(SpatialRegionMapType map)
{
m_LargestPossibleSpatialRegionCache = map;
}
const SpatialRegionMapType &
GetRequestedSpatialRegionCache() const
{
return m_RequestedSpatialRegionCache;
}
void
SetRequestedSpatialRegionCache(SpatialRegionMapType map)
{
m_RequestedSpatialRegionCache = map;
}
const SpatialRegionMapType &
GetBufferedSpatialRegionCache() const
{
return m_BufferedSpatialRegionCache;
}
void
SetBufferedSpatialRegionCache(SpatialRegionMapType map)
{
m_BufferedSpatialRegionCache = map;
}
const SpacingMapType &
GetSpacingCache() const
{
return m_SpacingCache;
}
void
SetSpacingCache(SpacingMapType map)
{
m_SpacingCache = map;
}
const PointMapType &
GetOriginCache() const
{
return m_OriginCache;
}
void
SetOriginCache(PointMapType map)
{
m_OriginCache = map;
}
const DirectionMapType &
GetDirectionCache() const
{
return m_DirectionCache;
}
void
SetDirectionCache(DirectionMapType map)
{
m_DirectionCache = map;
}
const NumberOfComponentsPerPixelMapType &
GetNumberOfComponentsPerPixelCache() const
{
return m_NumberOfComponentsPerPixelCache;
}
void
GetNumberOfComponentsPerPixelCache(NumberOfComponentsPerPixelMapType map)
{
m_NumberOfComponentsPerPixelCache = map;
}
/** Set the contents of the frame at a given frame number */
void
SetFrame(SizeValueType frameNumber, FramePointer frame);
/** Get the frame for the given frame number. Internally, we always leave the
* Head of the ring buffer in place and just use the frame number as an
* offset. This allows all references to frames to be processed by an
* explicit frame number rather than a potentially confusing offset. */
FrameType *
GetFrame(SizeValueType frameNumber);
const FrameType *
GetFrame(SizeValueType frameNumber) const;
/** Get/Set the LargestPossibleRegion of a frame */
void
SetFrameLargestPossibleSpatialRegion(SizeValueType frameNumber, SpatialRegionType region);
const SpatialRegionType &
GetFrameLargestPossibleSpatialRegion(SizeValueType frameNumber) const;
/** Get/Set the RequestedRegion of a frame */
void
SetFrameRequestedSpatialRegion(SizeValueType frameNumber, SpatialRegionType region);
const SpatialRegionType &
GetFrameRequestedSpatialRegion(SizeValueType frameNumber) const;
/** Get/Set the BufferedRegion of a frame */
void
SetFrameBufferedSpatialRegion(SizeValueType frameNumber, SpatialRegionType region);
const SpatialRegionType &
GetFrameBufferedSpatialRegion(SizeValueType frameNumber) const;
/** Get/Set the Spacing of a frame */
void
SetFrameSpacing(SizeValueType frameNumber, SpacingType spacing);
const SpacingType &
GetFrameSpacing(SizeValueType frameNumber) const;
/** Get/Set the Origin of a frame */
void
SetFrameOrigin(SizeValueType frameNumber, PointType origin);
const PointType &
GetFrameOrigin(SizeValueType frameNumber) const;
/** Get/Set the Direction of a frame */
void
SetFrameDirection(SizeValueType frameNumber, DirectionType direction);
const DirectionType &
GetFrameDirection(SizeValueType frameNumber) const;
/** Get/Set the NumberOfComponentsPerPixel of a frame */
void
SetFrameNumberOfComponentsPerPixel(SizeValueType frameNumber, unsigned int n);
const NumberOfComponentsPerPixelType &
GetFrameNumberOfComponentsPerPixel(SizeValueType frameNumber) const;
/** Set the LargestPossibleRegion on all frames. This assumes that all frames
* in the buffered temporal region have been initialized (should be called
* after InitializeEmptyFrames). */
void
SetAllLargestPossibleSpatialRegions(SpatialRegionType region);
/** Set the RequestedRegion on all frames. This assumes that all frames in
* the buffered temporal region have been initialized (should be called
* after InitializeEmptyFrames). */
void
SetAllRequestedSpatialRegions(SpatialRegionType region);
/** Set the BufferedRegion on all frames. This assumes that all frames in the
* buffered temporal region have been initialized (should be called after
* InitializeEmptyFrames). */
void
SetAllBufferedSpatialRegions(SpatialRegionType region);
/** Set the Spacing of all frames. This assumes that all frames in the
* buffered temporal region have been initialized (should be called after
* InitializeEmptyFrames). */
void
SetAllFramesSpacing(SpacingType spacing);
/** Set the Origin of all frames. This assumes that all frames in the
* buffered temporal region have been initialized (should be called after
* InitializeEmptyFrames). */
void
SetAllFramesOrigin(PointType origin);
/** Set the Direction of all frames. This assumes that all frames in the
* buffered temporal region have been initialized (should be called after
* InitializeEmptyFrames). */
void
SetAllFramesDirection(DirectionType direction);
/** Set the number of components per pixel of all frames.
* This assumes that all frames in the buffered temporal region have been
* initialized (should be called after InitializeEmptyFrames). */
void
SetAllFramesNumberOfComponentsPerPixel(NumberOfComponentsPerPixelType n);
/** Allocate memory for the buffered spatial region of each frame in the
* buffered temporal region. This assumes that all frames in the buffered
* temporal region have been initialized and that the buffered spatial region
* has been set for each of these frames. A typical setup would look like:
*
\code
// Set the temporal regions
TemporalRegionType temporalRegion;
temporalRegion.SetFrameStart( 0 );
temporalRegion.SetFrameDuration( 3 );
video->SetLargestPossibleTemporalRegion( temporalRegion );
video->SetRequestedTemporalRegion( temporalRegion );
video->SetBufferedTemporalRegion( temporalRegion );
// Initialize all frames in the buffered temporal region
video->InitializeEmptyFrames();
// Set the buffered spatial region for each frame
SpatialRegionType bufferedSpatialRegion;
SpatialRegionType::SizeType size;
SpatialRegionType::IndexType start;
size[0] = 50;
size[1] = 40;
start.Fill( 0 );
bufferedSpatialRegion.SetSize( size );
bufferedSpatialRegion.SetIndex( start );
video->SetAllBufferedSpatialRegions( bufferedSpatialRegion );
// Allocate memory for the frames
video->Allocate();
\endcode
*/
void
Allocate();
/** Graft the data and information from one VideoStream to this one. This
* just copies the meta information using TemporalProcessObject's Graft then
* sets the internal RingBuffer pointer to point to the same buffer used by
* the other VideoStream. */
void
Graft(const DataObject * data) override;
protected:
VideoStream() = default;
~VideoStream() override = default;
void
PrintSelf(std::ostream & os, Indent indent) const override
{
Superclass::Print(os, indent);
}
/** These maps are used to cache a mapping between frame number and spatial
* region. This is done because frames will often not be in actual existence
* at the time when the region gets set. */
SpatialRegionMapType m_LargestPossibleSpatialRegionCache{};
SpatialRegionMapType m_RequestedSpatialRegionCache{};
SpatialRegionMapType m_BufferedSpatialRegionCache{};
/** These maps cache a mapping between frame number and the meta data for
* origin, spacing, direction, and number of components per pixel */
SpacingMapType m_SpacingCache{};
DirectionMapType m_DirectionCache{};
PointMapType m_OriginCache{};
NumberOfComponentsPerPixelMapType m_NumberOfComponentsPerPixelCache{};
}; // end class VideoStream
} // end namespace itk
#ifndef ITK_MANUAL_INSTANTIATION
# include "itkVideoStream.hxx"
#endif
#endif
|