diff --git a/TAO/orbsvcs/orbsvcs/FaultTolerance/FT_Invocation_Endpoint_Selectors.cpp b/TAO/orbsvcs/orbsvcs/FaultTolerance/FT_Invocation_Endpoint_Selectors.cpp index 638686d0048fb..d3d6dfe6ccd20 100644 --- a/TAO/orbsvcs/orbsvcs/FaultTolerance/FT_Invocation_Endpoint_Selectors.cpp +++ b/TAO/orbsvcs/orbsvcs/FaultTolerance/FT_Invocation_Endpoint_Selectors.cpp @@ -49,25 +49,32 @@ TAO_FT_Invocation_Endpoint_Selector::select_primary ( TAO::Profile_Transport_Resolver *r, ACE_Time_Value *max_wait_time) { - // Set lock, as forward_profiles might be deleted concurrently. - ACE_MT (ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, - guard, - const_cast (r->stub ()->profile_lock ()), - false)); - - // Grab the forwarded list - TAO_MProfile *prof_list = - const_cast (r->stub ()->forward_profiles ()); - - TAO_MProfile &basep = r->stub ()->base_profiles (); + TAO_MProfile *prof_list; + TAO_MProfile prof_list_aux; - if (prof_list ==0) - { - prof_list = &basep; - // No need to hold stub lock any more. We needed it only to use - // forward_profiles. - guard.release (); - } + // Retrieve the list of profiles to be used. + // Set lock, as forward_profiles might be deleted concurrently. + { + ACE_MT (ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, + guard, + const_cast (r->stub ()->profile_lock ()), + false)); + + // Grab the forwarded list + TAO_MProfile *forward_prof_list = + const_cast (r->stub ()->forward_profiles ()); + + if (forward_prof_list == 0) + { + TAO_MProfile &basep = r->stub ()->base_profiles (); + prof_list = &basep; + } + else + { + prof_list_aux.set(*forward_prof_list); + prof_list = &prof_list_aux; + } + } if (prof_list == 0) return false; @@ -107,26 +114,32 @@ TAO_FT_Invocation_Endpoint_Selector::select_secondary ( TAO::Profile_Transport_Resolver *r, ACE_Time_Value *max_wait_time) { - // Set lock, as forward_profiles might be deleted concurrently. - ACE_MT (ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, - guard, - const_cast (r->stub ()->profile_lock ()), - false)); - - // Grab the forwarded list - TAO_MProfile *prof_list = - const_cast (r->stub ()->forward_profiles ()); - - TAO_MProfile &basep = - r->stub ()->base_profiles (); + TAO_MProfile *prof_list; + TAO_MProfile prof_list_aux; - if (prof_list ==0) - { - prof_list = &basep; - // No need to hold stub lock any more. We needed it only to use - // forward_profiles. - guard.release (); - } + // Retrieve the list of profiles to be used. + // Set lock, as forward_profiles might be deleted concurrently. + { + ACE_MT (ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, + guard, + const_cast (r->stub ()->profile_lock ()), + false)); + + // Grab the forwarded list + TAO_MProfile *forward_prof_list = + const_cast (r->stub ()->forward_profiles ()); + + if (forward_prof_list == 0) + { + TAO_MProfile &basep = r->stub ()->base_profiles (); + prof_list = &basep; + } + else + { + prof_list_aux.set(*forward_prof_list); + prof_list = &prof_list_aux; + } + } if (prof_list == 0) return false;