TuttleOFX  1
ImageEffectNode.cpp
Go to the documentation of this file.
00001 #include "ImageEffectNode.hpp"
00002 #include "HostDescriptor.hpp"
00003 
00004 // ofx host
00005 #include <tuttle/host/Core.hpp> // for core().getMemoryCache()
00006 #include <tuttle/host/attribute/ClipImage.hpp>
00007 #include <tuttle/host/attribute/allParams.hpp>
00008 #include <tuttle/host/graph/ProcessEdgeAtTime.hpp>
00009 #include <tuttle/host/graph/ProcessVertexData.hpp>
00010 #include <tuttle/host/graph/ProcessVertexAtTimeData.hpp>
00011 
00012 #include <tuttle/host/ofx/OfxhUtilities.hpp>
00013 #include <tuttle/host/ofx/OfxhBinary.hpp>
00014 #include <tuttle/host/ofx/OfxhMemory.hpp>
00015 #include <tuttle/host/ofx/OfxhImageEffectNode.hpp>
00016 #include <tuttle/host/ofx/OfxhPluginAPICache.hpp>
00017 #include <tuttle/host/ofx/OfxhPluginCache.hpp>
00018 #include <tuttle/host/ofx/OfxhHost.hpp>
00019 #include <tuttle/host/ofx/OfxhImageEffectPlugin.hpp>
00020 #include <tuttle/host/ofx/property/OfxhSet.hpp>
00021 #include <tuttle/host/ofx/attribute/OfxhClip.hpp>
00022 #include <tuttle/host/ofx/attribute/OfxhParam.hpp>
00023 
00024 #ifndef TUTTLE_PRODUCTION
00025 // to output all nodes as png for debug
00026 //#define TUTTLE_DEBUG_OUTPUT_ALL_NODES
00027 #endif
00028 
00029 // ofx
00030 #include <ofxCore.h>
00031 #include <ofxImageEffect.h>
00032 
00033 #include <boost/functional/hash.hpp>
00034 #include <boost/foreach.hpp>
00035 #include <boost/lexical_cast.hpp>
00036 
00037 #include <boost/log/trivial.hpp>
00038 
00039 #include <iomanip>
00040 #include <iostream>
00041 #include <fstream>
00042 #include <list>
00043 
00044 namespace tuttle {
00045 namespace host {
00046 
00047 ImageEffectNode::ImageEffectNode( tuttle::host::ofx::imageEffect::OfxhImageEffectPlugin&         plugin,
00048                                   tuttle::host::ofx::imageEffect::OfxhImageEffectNodeDescriptor& desc,
00049                                   const std::string&                                             context )
00050         : tuttle::host::ofx::imageEffect::OfxhImageEffectNode( plugin, desc, context, false )
00051 {
00052         populate();
00053         //      createInstanceAction();
00054 }
00055 
00056 ImageEffectNode::ImageEffectNode( const ImageEffectNode& other )
00057         : INode( other )
00058         , tuttle::host::ofx::imageEffect::OfxhImageEffectNode( other )
00059 {
00060         populate();
00061         copyAttributesValues( other ); // values need to be setted before the createInstanceAction !
00062         createInstanceAction();
00063 }
00064 
00065 ImageEffectNode::~ImageEffectNode()
00066 {}
00067 
00068 bool ImageEffectNode::operator==( const INode& other ) const
00069 {
00070         const ImageEffectNode* other_ptr = dynamic_cast<const ImageEffectNode*>( &other );
00071         if( other_ptr == NULL )
00072                 return false;
00073         return operator==( *other_ptr );
00074 }
00075 
00076 /**
00077  * @warning do a deep comparison
00078  */
00079 bool ImageEffectNode::operator==( const ImageEffectNode& other ) const
00080 {
00081         return ofx::imageEffect::OfxhImageEffectNode::operator==( other );
00082 }
00083 
00084 void ImageEffectNode::connect( const INode& sourceEffect, attribute::Attribute& attr )
00085 {
00086         const attribute::ClipImage& outputClip = dynamic_cast<const attribute::ClipImage&>( sourceEffect.getClip( kOfxImageEffectOutputClipName ) );
00087         attribute::ClipImage& inputClip        = dynamic_cast<attribute::ClipImage&>( attr ); // throw an exception if not a ClipImage attribute
00088 
00089         inputClip.setConnectedClip( outputClip );
00090 }
00091 
00092 attribute::Attribute& ImageEffectNode::getSingleInputAttribute()
00093 {
00094         ofx::attribute::OfxhClipImageSet::ClipImageVector& clips = getClipsByOrder();
00095         ofx::attribute::OfxhClipImageSet::ClipImageMap& clipsMap = getClipsByName();
00096         ofx::attribute::OfxhAttribute* inAttr                    = NULL;
00097 
00098         if( clips.size() == 1 )
00099         {
00100                 inAttr = &clips[0];
00101         }
00102         else if( clips.size() > 1 )
00103         {
00104                 const ofx::attribute::OfxhClipImageSet::ClipImageMap::iterator it( clipsMap.find( kOfxSimpleSourceAttributeName ) );
00105                 if( it != clipsMap.end() )
00106                 {
00107                         inAttr = it->second;
00108                 }
00109                 else
00110                 {
00111                         inAttr = &clips[0];
00112                 }
00113         }
00114         else // if( inClips.empty() )
00115         {
00116                 BOOST_THROW_EXCEPTION( exception::Logic()
00117                         << exception::user( "No source clip." ) );
00118         }
00119         return dynamic_cast<attribute::ClipImage&>( *inAttr );
00120 }
00121 
00122 // get a new clip instance
00123 tuttle::host::ofx::attribute::OfxhClipImage* ImageEffectNode::newClipImage( const tuttle::host::ofx::attribute::OfxhClipImageDescriptor& descriptor )
00124 {
00125         return new attribute::ClipImage( *this, descriptor );
00126 }
00127 
00128 std::size_t ImageEffectNode::getLocalHashAtTime( const OfxTime time ) const
00129 {
00130         std::size_t seed = getPlugin().getHash();
00131 
00132         if( isFrameVarying() )
00133         {
00134                 boost::hash_combine( seed, time );
00135         }
00136 
00137         boost::hash_combine( seed, getParamSet().getHashAtTime(time) );
00138 
00139         return seed;
00140 }
00141 
00142 /// get default output fielding. This is passed into the clip prefs action
00143 /// and  might be mapped (if the host allows such a thing)
00144 const std::string& ImageEffectNode::getDefaultOutputFielding() const
00145 {
00146         /// our clip is pretending to be progressive PAL SD, so return kOfxImageFieldNone
00147         static const std::string v( kOfxImageFieldNone );
00148 
00149         return v;
00150 }
00151 
00152 /**
00153  * @return 1 to abort processing
00154  */
00155 int ImageEffectNode::abort()
00156 {
00157         return 0;
00158 }
00159 
00160 ofx::OfxhMemory* ImageEffectNode::newMemoryInstance( size_t nBytes )
00161 {
00162         ofx::OfxhMemory* instance = new ofx::OfxhMemory();
00163 
00164         instance->alloc( nBytes );
00165         return instance;
00166 }
00167 
00168 // vmessage
00169 void ImageEffectNode::vmessage( const char* type,
00170                                 const char* id,
00171                                 const char* format,
00172                                 va_list     args ) const OFX_EXCEPTION_SPEC
00173 {
00174         vprintf( format, args );
00175 }
00176 
00177 // get the project size in CANONICAL pixels, so PAL SD return 768, 576
00178 void ImageEffectNode::getProjectSize( double& xSize, double& ySize ) const
00179 {
00180         if (_dataAtTime.size() == 0 )
00181         {
00182                 xSize = 720;
00183                 ySize = 576;
00184         }
00185         else
00186         {
00187                 OfxRectD rod = getLastData()._apiImageEffect._renderRoD;
00188                 xSize = rod.x2 - rod.x1;
00189                 ySize = rod.y2 - rod.y1;
00190                 if (xSize < 1 || ySize < 1)
00191                 {
00192                         xSize = 720;
00193                         ySize = 576;
00194                 }
00195         }
00196 }
00197 
00198 // get the project offset in CANONICAL pixels, we are at 0,0
00199 void ImageEffectNode::getProjectOffset( double& xOffset, double& yOffset ) const
00200 {
00201         xOffset = 0;
00202         yOffset = 0;
00203 }
00204 
00205 // get the project extent in CANONICAL pixels, so PAL SD return 768, 576
00206 void ImageEffectNode::getProjectExtent( double& xSize, double& ySize ) const
00207 {
00208         if (_dataAtTime.size() == 0 )
00209         {
00210                 xSize = 720;
00211                 ySize = 576;
00212         }
00213         else
00214         {
00215                 OfxRectD rod = getLastData()._apiImageEffect._renderRoD;
00216                 xSize = rod.x2 - rod.x1;
00217                 ySize = rod.y2 - rod.y1;
00218                 if (xSize < 1 || ySize < 1)
00219                 {
00220                         xSize = 720;
00221                         ySize = 576;
00222                 }
00223         }
00224 }
00225 
00226 // get the PAR, SD PAL is 768/720=1.0666
00227 double ImageEffectNode::getProjectPixelAspectRatio() const
00228 {
00229         return 1.0;
00230 }
00231 
00232 // we are only 25 frames
00233 double ImageEffectNode::getEffectDuration() const
00234 {
00235         return 99999.0;
00236 }
00237 
00238 /// This is called whenever a param is changed by the plugin so that
00239 /// the recursive instanceChangedAction will be fed the correct frame
00240 double ImageEffectNode::getFrameRecursive() const
00241 {
00242         return 0.0;
00243 }
00244 
00245 /// This is called whenever a param is changed by the plugin so that
00246 /// the recursive instanceChangedAction will be fed the correct
00247 /// renderScale
00248 void ImageEffectNode::getRenderScaleRecursive( double& x, double& y ) const
00249 {
00250         x = y = 1.0;
00251 }
00252 
00253 /**
00254  * The pixel components type of the current project
00255  * @todo tuttle: to remove in the future.... size, pixelType, BitDepth, etc... must be locally defined
00256  */
00257 const std::string ImageEffectNode::getProjectPixelComponentsType() const
00258 {
00259         return kOfxImageComponentRGBA;
00260 }
00261 
00262 /**
00263  * The pixel bit depth of the current project (host work in float)
00264  * @todo tuttle: to remove in the future.... size, pixelType, BitDepth, etc... must be locally defined
00265  */
00266 const std::string ImageEffectNode::getProjectBitDepth() const
00267 {
00268         //return kOfxBitDepthByte;
00269         return kOfxBitDepthFloat;
00270 }
00271 
00272 // make a parameter instance
00273 ofx::attribute::OfxhParam* ImageEffectNode::newParam( const ofx::attribute::OfxhParamDescriptor& descriptor ) OFX_EXCEPTION_SPEC
00274 {
00275         const std::string name = descriptor.getName();
00276         ofx::attribute::OfxhParam* param = NULL;
00277 
00278         try
00279         {
00280                 if( descriptor.getParamType() == kOfxParamTypeString )
00281                         param = new attribute::ParamString( *this, name,  descriptor );
00282                 else if( descriptor.getParamType() == kOfxParamTypeInteger )
00283                         param = new attribute::ParamInteger( *this, name,  descriptor );
00284                 else if( descriptor.getParamType() == kOfxParamTypeDouble )
00285                         param = new attribute::ParamDouble( *this, name,  descriptor );
00286                 else if( descriptor.getParamType() == kOfxParamTypeBoolean )
00287                         param = new attribute::ParamBoolean( *this, name,  descriptor );
00288                 else if( descriptor.getParamType() == kOfxParamTypeChoice )
00289                         param = new attribute::ParamChoice( *this, name,  descriptor );
00290                 else if( descriptor.getParamType() == kOfxParamTypeRGBA )
00291                         param = new attribute::ParamRGBA( *this, name,  descriptor );
00292                 else if( descriptor.getParamType() == kOfxParamTypeRGB )
00293                         param = new attribute::ParamRGB( *this, name,  descriptor );
00294                 else if( descriptor.getParamType() == kOfxParamTypeDouble2D )
00295                         param = new attribute::ParamDouble2D( *this, name,  descriptor );
00296                 else if( descriptor.getParamType() == kOfxParamTypeDouble3D )
00297                         param = new attribute::ParamDouble3D( *this, name,  descriptor );
00298                 else if( descriptor.getParamType() == kOfxParamTypeInteger2D )
00299                         param = new attribute::ParamInteger2D( *this, name,  descriptor );
00300                 else if( descriptor.getParamType() == kOfxParamTypeInteger3D )
00301                         param = new attribute::ParamInteger3D( *this, name,  descriptor );
00302                 else if( descriptor.getParamType() == kOfxParamTypePushButton )
00303                         param = new attribute::ParamPushButton( *this, name,  descriptor );
00304                 else if( descriptor.getParamType() == kOfxParamTypeGroup )
00305                         param = new attribute::ParamGroup( *this, name,  descriptor );
00306                 else if( descriptor.getParamType() == kOfxParamTypePage )
00307                         param = new attribute::ParamPage( *this, name,  descriptor );
00308                 else if( descriptor.getParamType() == kOfxParamTypeCustom )
00309                         param = new attribute::ParamCustom( *this, name,  descriptor );
00310                 else
00311                 {
00312                         BOOST_THROW_EXCEPTION( exception::Failed()
00313                             << exception::user() + "Can't create param " + quotes( name ) + " instance from param descriptor, type not recognized." );
00314                 }
00315                 this->addParam( param );
00316         }
00317         catch( exception::Common& e )
00318         {
00319                 BOOST_THROW_EXCEPTION( ofx::OfxhException( *boost::get_error_info<exception::ofxStatus>( e ),
00320                                                            boost::diagnostic_information( e ) ) );
00321         }
00322         catch(... )
00323         {
00324                 BOOST_THROW_EXCEPTION( ofx::OfxhException( kOfxStatErrUnknown,
00325                                                            boost::current_exception_diagnostic_information() ) );
00326         }
00327         return param;
00328 }
00329 
00330 void ImageEffectNode::editBegin( const std::string& name ) OFX_EXCEPTION_SPEC
00331 {
00332         //BOOST_THROW_EXCEPTION( ofx::OfxhException( kOfxStatErrMissingHostFeature ) );
00333 }
00334 
00335 void ImageEffectNode::editEnd() OFX_EXCEPTION_SPEC
00336 {
00337         //BOOST_THROW_EXCEPTION( ofx::OfxhException( kOfxStatErrMissingHostFeature ) );
00338 }
00339 
00340 /// Start doing progress.
00341 void ImageEffectNode::progressStart( const std::string& message )
00342 {
00343         //TUTTLE_LOG_TRACE( message );
00344         if( !( getContext() == kOfxImageEffectContextReader ) && !( getContext() == kOfxImageEffectContextWriter ) )
00345                 TUTTLE_LOG_INFO( std::left << "       " << common::Color::get()->_green << std::setw( TUTTLE_LOG_PLUGIN_NAME_WIDTH ) << getName() << common::Color::get()->_std );
00346 }
00347 
00348 /// finish yer progress
00349 void ImageEffectNode::progressEnd()
00350 {
00351         //std::cout << std::endl;
00352 }
00353 
00354 /// set the progress to some level of completion,
00355 /// returns true if you should abandon processing, false to continue
00356 bool ImageEffectNode::progressUpdate( const double progress )
00357 {
00358         /*
00359         if( ( getContext() == kOfxImageEffectContextReader ) || ( getContext() == kOfxImageEffectContextWriter ) )
00360                 TUTTLE_LOG_INFO( "\r" << common::Color::get()->_std << "[" << std::right << std::setw(3) << int(progress * 100) << "%] " << " " << std::left << std::flush );
00361         else
00362                 TUTTLE_LOG_INFO( "\r" << common::Color::get()->_std << "[" << std::right << std::setw(3) << int(progress * 100) << "%] " << std::left << common::Color::get()->_green << getName() << common::Color::get()->_std << std::flush );
00363                 */
00364         return false;
00365 }
00366 
00367 /// get the current time on the timeline. This is not necessarily the same
00368 /// time as being passed to an action (eg render)
00369 double ImageEffectNode::timelineGetTime()
00370 {
00371         return 0;
00372 }
00373 
00374 /// set the timeline to a specific time
00375 void ImageEffectNode::timelineGotoTime( double t )
00376 {}
00377 
00378 /// get the first and last times available on the effect's timeline
00379 void ImageEffectNode::timelineGetBounds( double& t1, double& t2 )
00380 {
00381         t1 = 0;
00382         t2 = 99999;
00383 }
00384 
00385 /// override to get frame range of the effect
00386 void ImageEffectNode::beginSequenceRenderAction( OfxTime   startFrame,
00387                                          OfxTime   endFrame,
00388                                          OfxTime   step,
00389                                          bool      interactive,
00390                                          OfxPointD renderScale ) OFX_EXCEPTION_SPEC
00391 {
00392         OfxhImageEffectNode::beginSequenceRenderAction( startFrame, endFrame, step, interactive, renderScale );
00393 }
00394 
00395 void ImageEffectNode::checkClipsConnections() const
00396 {
00397         for( ClipImageMap::const_iterator it = _clipImages.begin();
00398              it != _clipImages.end();
00399              ++it )
00400         {
00401                 const attribute::ClipImage& clip = dynamic_cast<const attribute::ClipImage&>( *( it->second ) );
00402                 if( !clip.isOutput() && !clip.isConnected() && !clip.isOptional() ) // one non optional input clip is unconnected
00403                 {
00404                         BOOST_THROW_EXCEPTION( exception::Logic()
00405                             << exception::user( "A non optional clip is unconnected ! (" + clip.getFullName() + ")" ) );
00406                 }
00407         }
00408 }
00409 
00410 void ImageEffectNode::initComponents()
00411 {
00412         attribute::ClipImage& outputClip    = dynamic_cast<attribute::ClipImage&>( getOutputClip() );
00413         //bool inputClipsFound                = false;
00414         std::string mostChromaticComponents = kOfxImageComponentNone;
00415 
00416         for( ClipImageMap::iterator it = _clipImages.begin();
00417              it != _clipImages.end();
00418              ++it )
00419         {
00420                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00421                 if( !clip.isOutput() && clip.isConnected() )
00422                 {
00423                         //inputClipsFound = true;
00424                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00425                         mostChromaticComponents = findMostChromaticComponents( linkClip.getComponentsString(), mostChromaticComponents );
00426                 }
00427         }
00428         // components
00429         for( ClipImageMap::iterator it = _clipImages.begin();
00430              it != _clipImages.end();
00431              ++it )
00432         {
00433                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00434                 if( !clip.isOutput() && clip.isConnected() )
00435                 {
00436                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00437                         if( clip.isSupportedComponent( mostChromaticComponents ) )
00438                                 clip.setComponentsStringIfNotModifiedByPlugin( linkClip.getComponentsString() );
00439                 }
00440         }
00441         if( outputClip.isSupportedComponent( mostChromaticComponents ) )
00442                 outputClip.setComponentsStringIfNotModifiedByPlugin( mostChromaticComponents );
00443 }
00444 
00445 /// @todo multiple PAR
00446 void ImageEffectNode::initInputClipsPixelAspectRatio()
00447 {
00448         std::set<double> inputPARs;
00449 //      if( supportsMultipleClipPARs() )
00450         {
00451                 for( ClipImageMap::iterator it = _clipImages.begin();
00452                          it != _clipImages.end();
00453                          ++it )
00454                 {
00455                         attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00456                         TUTTLE_TLOG( TUTTLE_INFO, "[Clip] " << clip.getName() );
00457                         if( !clip.isOutput() && clip.isConnected() )
00458                         {
00459                                 const attribute::ClipImage& linkClip = clip.getConnectedClip();
00460                                 const double par = linkClip.getPixelAspectRatio();
00461                                 TUTTLE_TLOG( TUTTLE_INFO, "[Clip] " << linkClip.getName() << ", pixel aspect ratio = " << par );
00462                                 clip.setPixelAspectRatio( par, ofx::property::eModifiedByHost );
00463                                 inputPARs.insert( par );
00464                         }
00465                 }
00466         }
00467 //      else
00468 //      {
00469 //              // @todo The plugin doesn't support PAR, the host should do the conversions!
00470 //              // http://openfx.sourceforge.net/Documentation/1.3/ofxProgrammingReference.html#ImageEffectsPixelAspectRatios
00471 //              // If a plugin does not accept clips of differing PARs, then the host must resample all images fed to that effect to agree with the output's PAR.
00472 //              // If a plugin does accept clips of differing PARs, it will need to specify the output clip's PAR in the kOfxImageEffectActionGetClipPreferences action.
00473 //              
00474 //              // Convert images here ? Or introduce convert nodes into the ProcessGraph?
00475 //              BOOST_ASSERT(false);
00476 //      }
00477         
00478         // Not supported yet. So fail in debug,
00479         // and process with a wrong pixel aspect ratio in release.
00480         TUTTLE_TLOG( TUTTLE_INFO, "[Clip] support Multiple clip PAR = " << supportsMultipleClipPARs() );
00481         TUTTLE_TLOG( TUTTLE_INFO, "[Clip] number of clips = " << getNbClips() );
00482         BOOST_ASSERT( inputPARs.size() <= 1 || supportsMultipleClipPARs() || getNbClips() <= 2 );
00483 }
00484 
00485 void ImageEffectNode::initInputClipsFps()
00486 {
00487         for( ClipImageMap::iterator it = _clipImages.begin();
00488              it != _clipImages.end();
00489              ++it )
00490         {
00491                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00492                 if( !clip.isOutput() && clip.isConnected() )
00493                 {
00494                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00495                         clip.setFrameRate( linkClip.getFrameRate() );
00496                 }
00497         }
00498 }
00499 
00500 void ImageEffectNode::initFps()
00501 {
00502         attribute::ClipImage& outputClip = dynamic_cast<attribute::ClipImage&>( getOutputClip() );
00503         outputClip.setFrameRate( getOutputFrameRate() );
00504 }
00505 
00506 void ImageEffectNode::initPixelAspectRatio()
00507 {
00508         attribute::ClipImage& outputClip = dynamic_cast<attribute::ClipImage&>( getOutputClip() );
00509         outputClip.setPixelAspectRatio( getOutputPixelAspectRatio(), ofx::property::eModifiedByHost );
00510 }
00511 
00512 void ImageEffectNode::maximizeBitDepthFromReadsToWrites()
00513 {
00514         std::string biggestBitDepth      = kOfxBitDepthNone;
00515         attribute::ClipImage& outputClip = dynamic_cast<attribute::ClipImage&>( getOutputClip() );
00516         bool inputClipsFound             = false;
00517 
00518         // init variables
00519         for( ClipImageMap::iterator it = _clipImages.begin();
00520              it != _clipImages.end();
00521              ++it )
00522         {
00523                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00524                 if( !clip.isOutput() && clip.isConnected() ) // filter for clip.isMask() and clip.isOptional() ?
00525                 {
00526                         inputClipsFound = true;
00527                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00528                         biggestBitDepth = ofx::imageEffect::findDeepestBitDepth( linkClip.getBitDepthString(), biggestBitDepth );
00529                 }
00530         }
00531         const std::string validBitDepth = this->bestSupportedBitDepth( biggestBitDepth );
00532 
00533         // bit depth
00534         if( supportsMultipleClipDepths() )
00535         {
00536                 // check if we support the bit depth of each input
00537                 // and fill input clip with connected clips bit depth
00538                 for( ClipImageMap::iterator it = _clipImages.begin();
00539                      it != _clipImages.end();
00540                      ++it )
00541                 {
00542                         attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00543                         if( !clip.isOutput() && clip.isConnected() )
00544                         {
00545                                 const attribute::ClipImage& linkClip = clip.getConnectedClip();
00546                                 const std::string& linkClipBitDepth  = linkClip.getBitDepthString();
00547                                 if( this->isSupportedBitDepth( linkClipBitDepth ) )
00548                                 {
00549                                         clip.setBitDepthStringIfUpperAndNotModifiedByPlugin( linkClipBitDepth );
00550                                 }
00551                         }
00552                 }
00553         }
00554         else // multiple clip depth not supported (standard case)
00555         {
00556                 if( inputClipsFound && // if we have an input clip
00557                     validBitDepth == kOfxBitDepthNone ) // if we didn't found a valid bit depth value
00558                 {
00559                         BOOST_THROW_EXCEPTION( exception::Logic()
00560                             << exception::user( "Pixel depth " + biggestBitDepth + " not supported on plugin : " + getName() ) );
00561                 }
00562                 for( ClipImageMap::iterator it = _clipImages.begin();
00563                      it != _clipImages.end();
00564                      ++it )
00565                 {
00566                         attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00567                         if( !clip.isOutput() && clip.isConnected() )
00568                         {
00569                                 const attribute::ClipImage& linkClip = clip.getConnectedClip();
00570                                 if( ( linkClip.getNode().getNodeType() == INode::eNodeTypeImageEffect &&
00571                                       linkClip.getNode().asImageEffectNode().isSupportedBitDepth( validBitDepth )
00572                                     ) ||
00573                                       linkClip.getNode().getNodeType() == INode::eNodeTypeBuffer
00574                                   )
00575                                 {
00576                                         clip.setBitDepthStringIfUpperAndNotModifiedByPlugin( validBitDepth );
00577                                 }
00578                         }
00579                 }
00580         }
00581         outputClip.setBitDepthStringIfUpperAndNotModifiedByPlugin( validBitDepth );
00582 }
00583 
00584 void ImageEffectNode::maximizeBitDepthFromWritesToReads()
00585 {
00586         //TUTTLE_TLOG( TUTTLE_INFO, "maximizeBitDepthFromWritesToReads: " << getName() );
00587         if( !supportsMultipleClipDepths() )
00588         {
00589                 attribute::ClipImage& outputClip         = dynamic_cast<attribute::ClipImage&>( getOutputClip() );
00590                 const std::string& outputClipBitDepthStr = outputClip.getBitDepthString();
00591                 for( ClipImageMap::iterator it = _clipImages.begin();
00592                      it != _clipImages.end();
00593                      ++it )
00594                 {
00595                         attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00596                         if( !clip.isOutput() && clip.isConnected() )
00597                         {
00598                                 /// @todo tuttle: what is the best way to access another node ?
00599                                 /// through the graph ? through a graph inside ProcessOptions ?
00600                                 /*const */ attribute::ClipImage& linkClip = clip.getConnectedClip();
00601 
00602                                 //TUTTLE_TLOG( TUTTLE_INFO, clip.getFullName() << "(" << clip.getBitDepth() << ")" << "-->" << linkClip.getFullName() << "(" << linkClip.getBitDepth() << ")" );
00603                                 if( linkClip.getNode().getNodeType() == INode::eNodeTypeImageEffect &&
00604                                     linkClip.getNode().asImageEffectNode().isSupportedBitDepth( outputClipBitDepthStr ) ) // need to be supported by the other node
00605                                 {
00606                                         if( linkClip.getNode().asImageEffectNode().supportsMultipleClipDepths() ) /// @todo tuttle: is this test correct in all cases?
00607                                         {
00608                                                 linkClip.setBitDepthStringIfUpper( outputClipBitDepthStr );
00609                                         }
00610                                         else
00611                                         {
00612                                                 linkClip.setBitDepthStringIfUpperAndNotModifiedByPlugin( outputClipBitDepthStr );
00613                                         }
00614                                 }
00615                                 //TUTTLE_TLOG( TUTTLE_INFO, clip.getFullName() << "(" << clip.getBitDepth() << ")" << "-->" << linkClip.getFullName() << "(" << linkClip.getBitDepth() << ")" );
00616                         }
00617                         //else
00618                         //{
00619                         //      TUTTLE_TLOG( TUTTLE_INFO, clip.getFullName() << "(" << clip.getBitDepth() << ")" << ", unconnected ? " << clip.isConnected() << ", output ? " << clip.isOutput() );
00620                         //}
00621                 }
00622         }
00623 }
00624 
00625 void ImageEffectNode::coutBitDepthConnections() const
00626 {
00627 #ifndef TUTTLE_PRODUCTION
00628         // validation
00629         for( ClipImageMap::const_iterator it = _clipImages.begin();
00630              it != _clipImages.end();
00631              ++it )
00632         {
00633                 const attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00634 
00635                 //const ofx::property::String& propPixelDepth       = clip.getProperties().fetchStringProperty( kOfxImageEffectPropPixelDepth );
00636                 //const ofx::property::String& propComponent        = clip.getProperties().fetchStringProperty( kOfxImageEffectPropComponents );
00637                 //const ofx::property::Double& propPixelAspectRatio = clip.getProperties().fetchDoubleProperty( kOfxImagePropPixelAspectRatio );
00638                 /*
00639                 TUTTLE_TLOG( TUTTLE_INFO, "-- " << "clip: " << " = " << clip.getFullName() );
00640                 TUTTLE_TLOG( TUTTLE_INFO, "-- " << kOfxImageEffectPropPixelDepth << " = " << propPixelDepth.getValue()
00641                                                            << " : " << ( propPixelDepth.getModifiedBy() == ofx::property::eModifiedByPlugin ? "(plugin)" : "(host)" ) );
00642                 TUTTLE_TLOG( TUTTLE_INFO, "-- " << kOfxImageEffectPropComponents << " = " << propComponent.getValue()
00643                                                            << " : " << ( propComponent.getModifiedBy() == ofx::property::eModifiedByPlugin ? "(plugin)" : "(host)" ) );
00644                 TUTTLE_TLOG( TUTTLE_INFO, "-- " << kOfxImagePropPixelAspectRatio << " = " << propPixelAspectRatio.getValue()
00645                                                            << " : " << ( propPixelAspectRatio.getModifiedBy() == ofx::property::eModifiedByPlugin ? "(plugin)" : "(host)" ) );
00646                 */
00647                 if( !clip.isOutput() && clip.isConnected() )
00648                 {
00649                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00650                         TUTTLE_TLOG( TUTTLE_INFO, "[Bit Depth Connection] Connection between " << clip.getFullName() << " (" << clip.getBitDepth() << " bytes)" << " => " << linkClip.getFullName() << " (" << linkClip.getBitDepth() << " bytes)." );
00651                 }
00652         }
00653 #endif
00654 }
00655 
00656 void ImageEffectNode::validBitDepthConnections() const
00657 {
00658         // validation
00659         for( ClipImageMap::const_iterator it = _clipImages.begin();
00660              it != _clipImages.end();
00661              ++it )
00662         {
00663                 const attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00664                 if( !clip.isOutput() && clip.isConnected() )
00665                 {
00666                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00667                         if( clip.getBitDepth() != linkClip.getBitDepth() )
00668                         {
00669                                 BOOST_THROW_EXCEPTION( exception::Logic()
00670                                         << exception::dev() + "Error in graph bit depth propagation."
00671                                                               "Connection between " + clip.getFullName() + " (" + clip.getBitDepth() + " bytes)" + " => " + linkClip.getFullName() + " (" + linkClip.getBitDepth() + " bytes)."
00672                                         << exception::pluginName( getName() )
00673                                         << exception::pluginIdentifier( getPlugin().getIdentifier() ) );
00674                         }
00675                 }
00676         }
00677 }
00678 
00679 OfxRangeD ImageEffectNode::getDefaultTimeDomain() const
00680 {
00681         //TUTTLE_TLOG( TUTTLE_INFO, "- ImageEffectNode::getDefaultTimeDomain: " << getName() );
00682         OfxRangeD range;
00683         range.min = kOfxFlagInfiniteMin;
00684         range.max = kOfxFlagInfiniteMax;
00685         // if no answer, compute it from input clips
00686         bool first = true;
00687         for( ClipImageMap::const_iterator it = _clipImages.begin();
00688                 it != _clipImages.end();
00689                 ++it )
00690         {
00691                 const attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00692                 if( !clip.isOutput() && clip.isConnected() )
00693                 {
00694                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00695                         const OfxRangeD clipRange = linkClip.getNode().getTimeDomain();
00696                         if( first )
00697                         {
00698                                 first = false;
00699                                 range = clipRange;
00700                         }
00701                         else
00702                         {
00703                                 // maybe better to use intersection instead of union
00704                                 range.min = std::min( range.min, clipRange.min );
00705                                 range.max = std::max( range.max, clipRange.max );
00706                         }
00707                 }
00708         }
00709         return range;
00710 }
00711 
00712 OfxRangeD ImageEffectNode::computeTimeDomain()
00713 {
00714         // Copy connected clips frameRange into each input clips
00715         for( ClipImageMap::iterator it = _clipImages.begin();
00716                 it != _clipImages.end();
00717                 ++it )
00718         {
00719                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( it->second ) );
00720                 if( !clip.isOutput() && clip.isConnected() )
00721                 {
00722                         const attribute::ClipImage& linkClip = clip.getConnectedClip();
00723                         const OfxRangeD clipRange = linkClip.getFrameRange();
00724                         clip.setFrameRange( clipRange.min, clipRange.max );
00725                 }
00726         }
00727         TUTTLE_TLOG( TUTTLE_INFO, "[Time domain] getTimeDomain " << quotes(getName()) << " computed by the host." );
00728         OfxRangeD defaultRange = getDefaultTimeDomain();
00729         OfxRangeD range = defaultRange;
00730 
00731         // ask to the plugin
00732         if( getTimeDomainAction( range ) )
00733         {
00734                 TUTTLE_TLOG( TUTTLE_INFO, "[Time domain] getTimeDomain " << quotes(getName()) << " computed by the plugin." );
00735         }
00736         else
00737         {
00738                 range = defaultRange;
00739         }
00740         dynamic_cast<attribute::ClipImage*>(_clipImages[kOfxImageEffectOutputClipName])->setFrameRange( range.min, range.max );
00741         dynamic_cast<attribute::ClipImage*>(_clipImages[kOfxImageEffectOutputClipName])->setUnmappedFrameRange( range.min, range.max );
00742         return range;
00743 }
00744 
00745 
00746 void ImageEffectNode::setup1()
00747 {
00748         checkClipsConnections();
00749         
00750         initInputClipsFps();
00751         initInputClipsPixelAspectRatio();
00752 
00753         getClipPreferencesAction();
00754         
00755         initComponents();
00756         initPixelAspectRatio();
00757         initFps();
00758         
00759         maximizeBitDepthFromReadsToWrites();
00760 }
00761 
00762 void ImageEffectNode::setup2_reverse()
00763 {
00764         maximizeBitDepthFromWritesToReads();
00765 }
00766 
00767 void ImageEffectNode::setup3()
00768 {
00769         maximizeBitDepthFromReadsToWrites();
00770         //coutBitDepthConnections();
00771         validBitDepthConnections();
00772 }
00773 
00774 void ImageEffectNode::beginSequence( graph::ProcessVertexData& vData )
00775 {
00776         //TUTTLE_TLOG( TUTTLE_INFO, "begin: " << getName() );
00777         beginSequenceRenderAction(
00778                         vData._renderTimeRange.min,
00779                         vData._renderTimeRange.max,
00780                         vData._step,
00781                         vData._interactive,
00782                         vData._renderScale
00783                 );
00784 }
00785 
00786 void ImageEffectNode::preProcess1( graph::ProcessVertexAtTimeData& vData )
00787 {
00788         TUTTLE_TLOG( TUTTLE_INFO, "[Pre Process 1] " << getName() << " at time: " << vData._time );
00789         //setCurrentTime( vData._time );
00790 
00791         OfxRectD rod;
00792         getRegionOfDefinitionAction(
00793                         vData._time,
00794                         vData._nodeData->_renderScale,
00795                         rod );
00796 //      TUTTLE_TLOG_VAR3( TUTTLE_INFO, this->getName(), vData._time, rod );
00797 //      TUTTLE_TLOG_VAR( TUTTLE_INFO, &getData(vData._time) );
00798 //      TUTTLE_TLOG_VAR( TUTTLE_INFO, &vData );
00799         vData._apiImageEffect._renderRoD = rod;
00800         vData._apiImageEffect._renderRoI = rod; ///< @todo tuttle: tile supports
00801 
00802         TUTTLE_TLOG( TUTTLE_INFO, "[Pre Process 1] rod: x1:" << rod.x1 << " y1:" << rod.y1 << " x2:" << rod.x2 << " y2:" << rod.y2 );
00803 }
00804 
00805 void ImageEffectNode::preProcess2_reverse( graph::ProcessVertexAtTimeData& vData )
00806 {
00807 //      TUTTLE_TLOG( TUTTLE_INFO, "preProcess2_finish: " << getName() << " at time: " << vData._time );
00808 
00809         getRegionOfInterestAction( vData._time,
00810                                    vData._nodeData->_renderScale,
00811                                    vData._apiImageEffect._renderRoI,
00812                                    vData._apiImageEffect._inputsRoI );
00813 //      TUTTLE_TLOG_VAR( TUTTLE_INFO, vData._renderRoD );
00814 //      TUTTLE_TLOG_VAR( TUTTLE_INFO, vData._renderRoI );
00815 }
00816 
00817 
00818 bool ImageEffectNode::isIdentity( const graph::ProcessVertexAtTimeData& vData, std::string& clip, OfxTime& time ) const
00819 {
00820         time = vData._time;
00821         double par = this->getOutputClip().getPixelAspectRatio();
00822         if( par == 0.0 )
00823                 par = 1.0;
00824         OfxRectI renderWindow;
00825         renderWindow.x1 = boost::numeric_cast<int>( std::floor( vData._apiImageEffect._renderRoI.x1 / par ) );
00826         renderWindow.x2 = boost::numeric_cast<int>( std::ceil( vData._apiImageEffect._renderRoI.x2 / par ) );
00827         renderWindow.y1 = boost::numeric_cast<int>( std::floor( vData._apiImageEffect._renderRoI.y1 ) );
00828         renderWindow.y2 = boost::numeric_cast<int>( std::ceil( vData._apiImageEffect._renderRoI.y2 ) );
00829         return isIdentityAction( time, vData._apiImageEffect._field, renderWindow, vData._nodeData->_renderScale, clip );
00830 }
00831 
00832 
00833 void ImageEffectNode::preProcess_infos( const graph::ProcessVertexAtTimeData& vData, const OfxTime time, graph::ProcessVertexAtTimeInfo& nodeInfos ) const
00834 {
00835 //      TUTTLE_TLOG( TUTTLE_INFO, "preProcess_infos: " << getName() );
00836         const OfxRectD rod             = vData._apiImageEffect._renderRoD;
00837         const std::size_t bitDepth     = this->getOutputClip().getBitDepth(); // value in bytes
00838         const std::size_t nbComponents = getOutputClip().getNbComponents();
00839         nodeInfos._memory = std::ceil( ( rod.x2 - rod.x1 ) * ( rod.y2 - rod.y1 ) * nbComponents * bitDepth );
00840 }
00841 
00842 
00843 void ImageEffectNode::process( graph::ProcessVertexAtTimeData& vData )
00844 {
00845 //      TUTTLE_TLOG( TUTTLE_INFO, "process: " << getName() );
00846         memory::IMemoryCache& memoryCache( core().getMemoryCache() );
00847         // keep the hand on all needed datas during the process function
00848         std::list<memory::CACHE_ELEMENT> allNeededDatas;
00849 
00850         double par = this->getOutputClip().getPixelAspectRatio();
00851         if( par == 0.0 )
00852                 par = 1.0;
00853         const OfxRectI renderWindow = {
00854                 boost::numeric_cast<int>( std::floor( vData._apiImageEffect._renderRoI.x1 / par ) ),
00855                 boost::numeric_cast<int>( std::floor( vData._apiImageEffect._renderRoI.y1 ) ),
00856                 boost::numeric_cast<int>( std::ceil( vData._apiImageEffect._renderRoI.x2 / par ) ),
00857                 boost::numeric_cast<int>( std::ceil( vData._apiImageEffect._renderRoI.y2 ) )
00858         };
00859 //      TUTTLE_TLOG_VAR( TUTTLE_INFO, roi );
00860 
00861 //      INode::ClipTimesSetMap timesSetMap = this->getFramesNeeded( vData._time );
00862         
00863         // acquire needed clip images
00864         /*
00865         TUTTLE_TLOG( TUTTLE_INFO, "acquire needed input clip images" );
00866         TUTTLE_TLOG_VAR( TUTTLE_INFO, vData._inEdges.size() );
00867         TUTTLE_TLOG_VAR( TUTTLE_INFO, vData._outEdges.size() );
00868         BOOST_FOREACH( const graph::ProcessEdgeAtTime* o, vData._outEdges )
00869         {
00870                 TUTTLE_TLOG_VAR( TUTTLE_INFO, o );
00871                 TUTTLE_TLOG_VAR( TUTTLE_INFO, o->getInTime() );
00872                 TUTTLE_TLOG_VAR( TUTTLE_INFO, o->getInAttrName() );
00873         }
00874         BOOST_FOREACH( const graph::ProcessEdgeAtTime* i, vData._inEdges )
00875         {
00876                 TUTTLE_TLOG_VAR( TUTTLE_INFO, i );
00877                 TUTTLE_TLOG_VAR( TUTTLE_INFO, i->getInTime() );
00878                 TUTTLE_TLOG_VAR( TUTTLE_INFO, i->getInAttrName() );
00879         }
00880         */
00881         TUTTLE_TLOG( TUTTLE_INFO, "[Node Process] Acquire needed input clips images" );
00882         BOOST_FOREACH( const graph::ProcessVertexAtTimeData::ProcessEdgeAtTimeByClipName::value_type& inEdgePair, vData._inEdges )
00883         {
00884                 const graph::ProcessEdgeAtTime* inEdge = inEdgePair.second;
00885                 //TUTTLE_TLOG_VAR( TUTTLE_INFO, i );
00886                 //TUTTLE_TLOG_VAR( TUTTLE_INFO, i->getInTime() );
00887                 //TUTTLE_TLOG_VAR( TUTTLE_INFO, i->getInAttrName() );
00888                 attribute::ClipImage& clip = getClip( inEdge->getInAttrName() );
00889                 const OfxTime outTime = inEdge->getOutTime();
00890                 
00891                 TUTTLE_TLOG( TUTTLE_INFO, "[Node Process] out: " << inEdge->getOut() << " -> in " << inEdge->getIn() );
00892                 memory::CACHE_ELEMENT imageCache( memoryCache.get( clip.getClipIdentifier(), outTime ) );
00893                 if( imageCache.get() == NULL )
00894                 {
00895                         BOOST_THROW_EXCEPTION( exception::Memory()
00896                                 << exception::dev() + "Input attribute " + quotes( clip.getFullName() ) + " at time " + vData._time + " not in memory cache (identifier:" + quotes( clip.getClipIdentifier() ) + ")." );
00897                 }
00898                 allNeededDatas.push_back( imageCache );
00899         }
00900         
00901         TUTTLE_TLOG( TUTTLE_INFO, "[Node Process] Acquire needed output clip images" );
00902         BOOST_FOREACH( ClipImageMap::value_type& i, _clipImages )
00903         {
00904                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( i.second ) );
00905                 if( clip.isOutput() )
00906                 {
00907                         TUTTLE_TLOG( TUTTLE_INFO, "[Node Process] " << vData._apiImageEffect._renderRoI );
00908                         memory::CACHE_ELEMENT imageCache( new attribute::Image(
00909                                         clip,
00910                                         vData._time,
00911                                         vData._apiImageEffect._renderRoI,
00912                                         attribute::Image::eImageOrientationFromBottomToTop,
00913                                         0 )
00914                                 );
00915                         imageCache->setPoolData( core().getMemoryPool().allocate( imageCache->getMemorySize() ) );
00916                         memoryCache.put( clip.getClipIdentifier(), vData._time, imageCache );
00917                         
00918                         allNeededDatas.push_back( imageCache );
00919                 }
00920 //              else
00921 //              {
00922 //                      if( ! clip.isConnected() )
00923 //                              continue;
00924 //                      
00925 //                      // for each framesNeeded
00926 //                      const INode::TimesSet& timesSet = timesSetMap[clip.getName()]; /// @todo tuttle: for each edge use edge._outTime
00927 //                      if( timesSet.size() == 0 )
00928 //                              continue; // the plugin don't use this input (is it allowed by the standard?)
00929 //                      BOOST_FOREACH( const INode::TimesSet::value_type& inTime, timesSet )
00930 //                      {
00931 //                              memory::CACHE_ELEMENT imageCache( memoryCache.get( clip.getClipIdentifier(), inTime ) );
00932 //                              if( imageCache.get() == NULL )
00933 //                              {
00934 //                                      BOOST_THROW_EXCEPTION( exception::Memory()
00935 //                                              << exception::dev() + "Input attribute " + quotes( clip.getFullName() ) + " at time " + vData._time + " not in memory cache (identifier:" + quotes( clip.getClipIdentifier() ) + ")." );
00936 //                              }
00937 //                              allNeededDatas.push_back( imageCache );
00938 //                      }
00939 //              }
00940         }
00941 
00942         TUTTLE_TLOG( TUTTLE_INFO, "[Node Process] Plugin Render Action" );
00943 
00944         renderAction( vData._time,
00945                                   vData._apiImageEffect._field,
00946                                   renderWindow,
00947                                   vData._nodeData->_renderScale );
00948         
00949         debugOutputImage( vData._time );
00950 
00951         // release input images
00952         BOOST_FOREACH( const graph::ProcessVertexAtTimeData::ProcessEdgeAtTimeByClipName::value_type& inEdgePair, vData._inEdges )
00953         {
00954                 const graph::ProcessEdgeAtTime* inEdge = inEdgePair.second;
00955                 attribute::ClipImage& clip = getClip( inEdge->getInAttrName() );
00956                 const OfxTime outTime = inEdge->getOutTime();
00957                 /*
00958                 TUTTLE_TLOG_VAR2( TUTTLE_INFO, clip.getClipIdentifier(), outTime );
00959                 TUTTLE_TLOG_VAR2( TUTTLE_INFO, inEdge->getOut(), inEdge->getIn() );
00960                 TUTTLE_TLOG_VAR2( TUTTLE_INFO, clip.getIdentifier(), clip.getFullName() );
00961                 */
00962                 memory::CACHE_ELEMENT imageCache = memoryCache.get( clip.getClipIdentifier(), outTime );
00963                 if( imageCache.get() == NULL )
00964                 {
00965                         BOOST_THROW_EXCEPTION( exception::Memory()
00966                                 << exception::dev() + "Clip " + quotes( clip.getFullName() ) + " not in memory cache (identifier: " + quotes( clip.getClipIdentifier() ) + ", time: " + outTime + ")." );
00967                 }
00968                 imageCache->releaseReference( ofx::imageEffect::OfxhImage::eReferenceOwnerHost );
00969         }
00970         
00971         // declare future usages of the output
00972         BOOST_FOREACH( ClipImageMap::value_type& item, _clipImages )
00973         {
00974                 attribute::ClipImage& clip = dynamic_cast<attribute::ClipImage&>( *( item.second ) );
00975                 if( ! clip.isOutput() && ! clip.isConnected() )
00976                         continue;
00977                 
00978                 if( clip.isOutput() )
00979                 {
00980                         memory::CACHE_ELEMENT imageCache = memoryCache.get( clip.getClipIdentifier(), vData._time );
00981                         if( imageCache.get() == NULL )
00982                         {
00983                                 BOOST_THROW_EXCEPTION( exception::Memory()
00984                                         << exception::dev() + "Clip " + quotes( clip.getFullName() ) + " not in memory cache (identifier:" + quotes( clip.getClipIdentifier() ) + ")." );
00985                         }
00986                         TUTTLE_TLOG( TUTTLE_INFO, "[Node Process] Declare future usages: " << clip.getClipIdentifier() << ", add reference: " << vData._outDegree );
00987                         if( vData._outDegree > 0 )
00988                         {
00989                                 imageCache->addReference( ofx::imageEffect::OfxhImage::eReferenceOwnerHost, vData._outDegree ); // add a reference on this node for each future usages
00990                         }
00991                 }
00992 //              else
00993 //              {
00994 //                      const INode::TimesSet& timesSet = timesSetMap[clip.getName()]; /// @todo tuttle: use edge._timesNeeded
00995 //                      if( timesSet.size() == 0 )
00996 //                              continue; // the plugin don't use this input (is it allowed by the standard?)
00997 //                      BOOST_FOREACH( const INode::TimesSet::value_type& inTime, timesSet )
00998 //                      {
00999 //                              //TUTTLE_TLOG_VAR2( TUTTLE_INFO, clip.getIdentifier(), clip.getFullName() );
01000 //                              memory::CACHE_ELEMENT imageCache = memoryCache.get( clip.getClipIdentifier(), inTime );
01001 //                              if( imageCache.get() == NULL )
01002 //                              {
01003 //                                      BOOST_THROW_EXCEPTION( exception::Memory()
01004 //                                              << exception::dev() + "Clip " + quotes( clip.getFullName() ) + " not in memory cache (identifier:" + quotes( clip.getClipIdentifier() ) + ")." );
01005 //                              }
01006 //                              imageCache->releaseReference();
01007 //                      }
01008 //              }
01009         }
01010 }
01011 
01012 
01013 void ImageEffectNode::postProcess( graph::ProcessVertexAtTimeData& vData )
01014 {
01015 //      TUTTLE_TLOG( TUTTLE_INFO, "postProcess: " << getName() );
01016 }
01017 
01018 
01019 void ImageEffectNode::endSequence( graph::ProcessVertexData& vData )
01020 {
01021 //      TUTTLE_TLOG( TUTTLE_INFO, "end: " << getName() );
01022         endSequenceRenderAction( vData._renderTimeRange.min,
01023                          vData._renderTimeRange.max,
01024                          vData._step,
01025                          vData._interactive,
01026                          vData._renderScale );
01027 }
01028 
01029 
01030 std::ostream& ImageEffectNode::print( std::ostream& os ) const
01031 {
01032         const ImageEffectNode& v = *this;
01033         os << "________________________________________________________________________________" << std::endl;
01034         os << "Plug-in:" << v.getLabel() << std::endl;
01035         os << "Description:" << v.getLongLabel() << std::endl;
01036         os << "Context:" << v._context << std::endl;
01037         os << "Clips:" << std::endl;
01038         for( ImageEffectNode::ClipImageMap::const_iterator it = v._clipImages.begin(), itEnd = v._clipImages.end();
01039              it != itEnd;
01040              ++it )
01041         {
01042                 os << "  * " << it->second->getName() << std::endl;
01043         }
01044         os << "Params:" << std::endl;
01045         for( ImageEffectNode::ParamVector::const_iterator it = v._paramVector.begin(), itEnd = v._paramVector.end();
01046              it != itEnd;
01047              ++it )
01048         {
01049                 os << "  * " << it->getName() << " (" << it->getLabel() << "): " <<     it->displayValues(os) << std::endl;
01050         }
01051         os << "________________________________________________________________________________" << std::endl;
01052         return os;
01053 }
01054 
01055 std::ostream& operator<<( std::ostream& os, const ImageEffectNode& v )
01056 {
01057         return v.print(os);
01058 }
01059 
01060 void ImageEffectNode::debugOutputImage( const OfxTime time ) const
01061 {
01062         #ifdef TUTTLE_DEBUG_OUTPUT_ALL_NODES
01063         IMemoryCache& memoryCache( core().getMemoryCache() );
01064 
01065         boost::shared_ptr<Image> image = memoryCache.get( this->getName() + "." kOfxOutputAttributeName, time );
01066 
01067         // big hack, for debug...
01068         image->debugSaveAsPng( "data/debug/" + boost::lexical_cast<std::string>( time ) + "_" + this->getName() + ".png" );
01069         #endif
01070 }
01071 
01072 }
01073 }