diff --git a/CHANGELOG.md b/CHANGELOG.md index 134ee952f9..fefd18b381 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. + - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting - Support Sub of two variables, "Mul" of two constants, Slice, and ConstantOfShape in the python onnx parser - Renamed SmtCore module to SearchTreeHandler diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index c61e6d7848..3923dc09fc 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -67,6 +67,13 @@ const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY = 0; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 71f2fd3b2d..59d07b893f 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -151,6 +151,27 @@ class GlobalConfiguration // Random seed for generating simulation values. static const unsigned SIMULATION_RANDOM_SEED; + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + + // Learning rate for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + + // Weight decay for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; @@ -271,7 +292,6 @@ class GlobalConfiguration */ static const bool MINIMIZE_PROOF_DEPENDENCIES; - #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 6864e9fc76..54bf7b496d 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,7 +267,8 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/backward-once/backward-converge/lp-inc/milp/milp-inc/iter-prop/none." ) + "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/" + "iter-prop/none." ) #endif ; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 657ddb6b7c..0ddd143c13 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -209,6 +209,8 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE; if ( strategyString == "backward-converge" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; + if ( strategyString == "backward-preimage-approx" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index edd534503c..423a27f723 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1597,6 +1597,7 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1661,6 +1662,15 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() printf( "Backward analysis tightened %u bounds\n", tightened ); } } + + if ( _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } } void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) @@ -1688,6 +1698,7 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn return; case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; @@ -3699,6 +3710,7 @@ bool Engine::certifyUNSATCertificate() return false; } } + _UNSATCertificateCurrentPointer->get()->deleteUnusedLemmas(); struct timespec certificationStart = TimeUtils::sampleMicro(); _precisionRestorer.restoreInitialEngineState( *this ); @@ -3889,4 +3901,4 @@ void Engine::incNumOfLemmas() const List *Engine::getPiecewiseLinearConstraints() const { return &_plConstraints; -} +} \ No newline at end of file diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index f317639375..517acf73e9 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -34,6 +34,9 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis BACKWARD_ANALYSIS_ONCE = 5, BACKWARD_ANALYSIS_CONVERGE = 6, + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 + // [cs.SE]) + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, // Option to have no MILP bound tightening performed NONE = 10, }; diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c7619f4d1c..84447306f9 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file NetworkLevelReasoner.cpp +/*! \file LPFormulator.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz @@ -249,7 +249,8 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward ) + bool backward, + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -302,7 +303,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRlaxation( argument, backward ); + optimizeBoundsOfNeuronsWithLpRelaxation( argument, backward, coeffs ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -336,6 +337,13 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map throw InfeasibleQueryException(); } +void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) +{ + const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); + optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); +} + void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -384,7 +392,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -522,9 +532,9 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); else - createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -553,7 +563,6 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, } } - void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument &argument ) { try @@ -647,20 +656,31 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & void LPFormulator::createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer ) + unsigned lastLayer, + const Vector &coeffs ) { for ( const auto &layer : layers ) { - if ( layer.second->getLayerIndex() > lastLayer ) + unsigned currentLayerIndex = layer.second->getLayerIndex(); + if ( currentLayerIndex > lastLayer ) continue; - addLayerToModel( gurobi, layer.second, false ); + if ( coeffs.empty() ) + addLayerToModel( gurobi, layer.second, false ); + else + { + Map> layerIndicesToParameters = + layer.second->getParametersForLayers( layers, coeffs ); + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); + } } } void LPFormulator::createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ) + unsigned firstLayer, + const Vector &coeffs ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -678,7 +698,17 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers continue; else { - addLayerToModel( gurobi, currentLayer, true ); + if ( coeffs.empty() ) + addLayerToModel( gurobi, currentLayer, true ); + else + { + Map> layerIndicesToParameters = + currentLayer->getParametersForLayers( layers, coeffs ); + const Vector ¤tLayerCoeffs = + layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); + } + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) { if ( layerToDepth.exists( nextLayer ) ) @@ -846,7 +876,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -911,7 +940,6 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -977,9 +1005,7 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, double lb = std::max( 0.0, layer->getLb( i ) ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -994,7 +1020,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1027,7 +1052,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); @@ -1101,7 +1125,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1229,7 +1252,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - // Target is at least source: target - source >= 0 terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1267,7 +1289,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1330,8 +1351,6 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - List terms; if ( FloatUtils::areEqual( lb, ub ) ) { @@ -1464,20 +1483,21 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1526,10 +1546,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1537,16 +1557,15 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } } - void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1668,7 +1687,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, else { double width = sourceUb - sourceLb; - double coeff = ( sourceUb - slope * sourceLb ) / width; + double weight = ( sourceUb - slope * sourceLb ) / width; double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* @@ -1691,15 +1710,441 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + switch ( layer->getLayerType() ) + { + case Layer::RELU: + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::LEAKY_RELU: + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::SIGN: + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); + break; + + default: + addLayerToModel( gurobi, layer, createVariables ); + break; + } +} + +void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + double coeff = coeffs[0]; + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The ReLU is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The ReLU is inactive, y = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this ReLU is not yet fixed. + + For y = ReLU(x), we add the following relaxation: + + 1. y >= 0 + 2. y >= x + 2. y >= coeff * x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + /* + u ul + y <= ----- x - ----- + u - l u - l + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, + ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); + } + } + } +} + +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. + + For y = Sign(x), we add the following parallelogram relaxation: + + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * coeff[0] * x + 1 + - l + Varies continuously between y <= 1 and y <= -2/l * x + 1. + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * coeffs[0], + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * coeffs[1] * x - 1 + u + Varies continuously between y >= -1 and y >= 2/u * x - 1. + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * coeffs[1], + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); + } + } +} + +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + double slope = layer->getAlpha(); + double coeff = coeffs[0]; + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The LeakyReLU is active, y = x + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The LeakyReLU is inactive, y = alpha * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + /* + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following relaxation: + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= + alpha * x and y >= x). + 2. y >= x + 3. y >= alpha * x + 4. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= ((1 - alpha) * coeff + alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= alpha * x, i.e. y - alpha * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, bias ); } } } } +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayers; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, + -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, + -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); + } + } +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cee0abbb65..ac9ecc4557 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -52,7 +52,9 @@ class LPFormulator : public ParallelSolver constructed from scratch */ void optimizeBoundsWithLpRelaxation( const Map &layers, - bool backward = false ); + bool backward = false, + const Vector &coeffs = Vector( {} ) ); + void optimizeBoundsWithPreimageApproximation( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -72,10 +74,12 @@ class LPFormulator : public ParallelSolver */ void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer = UINT_MAX ); + unsigned lastLayer = UINT_MAX, + const Vector &coeffs = Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ); + unsigned firstLayer, + const Vector &coeffs = Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -127,7 +131,38 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, bool backward ); + void + optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + const Vector &coeffs = Vector( {} ) ); + + + // Create LP relaxations depending on external parameters. + void addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + + void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + const Vector &coeffs ); + /* Optimize for the min/max value of variableName with respect to the constraints diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 08e6900538..d24a952225 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -75,7 +75,9 @@ void Layer::allocateMemory() _inputLayerSize = ( _type == INPUT ) ? _size : _layerOwner->getLayer( 0 )->getSize(); if ( Options::get()->getSymbolicBoundTighteningType() == - SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) + SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -1061,7 +1063,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() } } - void Layer::computeIntervalArithmeticBoundsForRound() { for ( unsigned i = 0; i < _size; ++i ) @@ -1094,7 +1095,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() } } - void Layer::computeIntervalArithmeticBoundsForMax() { for ( unsigned i = 0; i < _size; ++i ) @@ -1104,7 +1104,6 @@ void Layer::computeIntervalArithmeticBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); double maxLowerBound = FloatUtils::negativeInfinity(); @@ -1115,6 +1114,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1196,14 +1196,12 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - ASSERT( sourceLayer->getSize() == _size ); Vector sourceLbs; Vector sourceUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1253,14 +1251,13 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1726,7 +1723,6 @@ void Layer::computeSymbolicBoundsForSign() for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicUb[j * _size + i] *= factor; - // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= factor; _symbolicUpperBias[i] += 1; @@ -2047,17 +2043,17 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) // Concrete upper bound: x_f <= ub_b double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _alpha * sourceLb ) / width; + double weight = ( sourceUb - _alpha * sourceLb ) / width; if ( _alpha <= 1 ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicUb[j * _size + i] *= coeff; + _symbolicUb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; + _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2092,11 +2088,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] *= coeff; + _symbolicLb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; + _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; if ( sourceUb > sourceLb ) @@ -2209,7 +2205,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() } } - void Layer::computeSymbolicBoundsForSigmoid() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -2269,7 +2264,7 @@ void Layer::computeSymbolicBoundsForSigmoid() double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -2548,11 +2543,6 @@ void Layer::computeSymbolicBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); double maxLowerBound = FloatUtils::negativeInfinity(); @@ -2562,6 +2552,7 @@ void Layer::computeSymbolicBoundsForMax() Map sourceUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2593,6 +2584,11 @@ void Layer::computeSymbolicBoundsForMax() } } + const Layer *sourceLayer = _layerOwner->getLayer( indexOfMaxLowerBound._layer ); + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + if ( phaseFixed ) { // Phase fixed @@ -2672,7 +2668,6 @@ void Layer::computeSymbolicBoundsForSoftmax() std::fill_n( _work, _size * _size, 0 ); Set handledInputNeurons; - unsigned sourceLayerSize = _size; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); for ( unsigned i = 0; i < _size; ++i ) @@ -2696,19 +2691,15 @@ void Layer::computeSymbolicBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - sourceLayerSize = sourceLayer->getSize(); - ASSERT( sourceLayerSize == _size ); Vector sourceLbs; Vector sourceUbs; Vector sourceMids; Vector targetLbs; Vector targetUbs; - unsigned len = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2718,8 +2709,6 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - - ++len; } // Find the index of i in the softmax @@ -2758,8 +2747,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; + symbolicLb[_size * sourceIndex._neuron + i] = 0; + symbolicUb[_size * sourceIndex._neuron + i] = 0; } } else @@ -2782,7 +2771,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2795,7 +2784,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2808,7 +2797,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -2822,7 +2811,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2834,7 +2823,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -2845,6 +2834,7 @@ void Layer::computeSymbolicBoundsForSoftmax() for ( const auto &sourceLayerEntry : _sourceLayers ) { const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + unsigned sourceLayerSize = sourceLayer->getSize(); /* Perform the multiplication @@ -3036,27 +3026,26 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3067,16 +3056,6 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -3106,47 +3085,75 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceLbs[1] >= 0 ) + if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( sourceUbs[1] >= 0 ) + if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { - _symbolicLb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( sourceLbs[0] >= 0 ) + if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -3327,7 +3334,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } - /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3397,172 +3403,1294 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +void Layer::computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive ) { - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) + switch ( _type ) { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } - - return std::exp( inputs[i] ) / sum; -} + case RELU: + computeParameterisedSymbolicBoundsForRelu( coeffs, receive ); + break; -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double val = 0; - if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + case SIGN: + computeParameterisedSymbolicBoundsForSign( coeffs, receive ); + break; - double ldi = inputLbs[di]; - double udi = inputUbs[di]; + case LEAKY_RELU: + computeParameterisedSymbolicBoundsForLeakyRelu( coeffs, receive ); + break; - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; + case BILINEAR: + computeParameterisedSymbolicBoundsForBilinear( coeffs, receive ); + break; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + default: + computeSymbolicBounds(); + break; } - - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); - - return val; } -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ) { - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) + ASSERT( coeffs.size() == 1 ); + + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) { - if ( mid > max ) + if ( _eliminatedNeurons.exists( i ) ) { - max = mid; - maxInputIndex = index; + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; } - ++index; } - if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); - else + for ( unsigned i = 0; i < _size; ++i ) { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + if ( _eliminatedNeurons.exists( i ) ) + continue; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } + /* + There are two ways we can determine that a ReLU has become fixed: - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; - if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - if ( i == di ) + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; } - else if ( maxInputIndex == di ) + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } - } - return -val + val2 * sum2; + reluPhase = RELU_PHASE_ACTIVE; } - else + else if ( !FloatUtils::isPositive( sourceUb ) ) { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); + reluPhase = RELU_PHASE_INACTIVE; } - } -} -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, + if ( reluPhase == PHASE_NOT_FIXED ) + { + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). + + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) + { + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + } + + // Lower bound: y >= coeff * x (varies continuously between y >= 0 and y >= alpha * x). + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] *= coeff; + + _symbolicLowerBias[i] *= coeff; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } + + _symbolicLbOfLb[i] = 0; + } + else + { + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + } + } + + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ) +{ + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + // Eliminate neurons are skipped + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + /* + There are two ways we can determine that a Sign has become fixed: + + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + signPhase = SIGN_PHASE_POSITIVE; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + signPhase = SIGN_PHASE_NEGATIVE; + } + + if ( signPhase == PHASE_NOT_FIXED ) + { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + + // If we got here, we know that lbLb < 0 and ubUb + // > 0 + + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; + + _symbolicUpperBias[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; + } + else + { + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + double factor = -2.0 / _symbolicLbOfLb[i] * coeffs[0]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; + } + + // Lower bound + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) + { + // The lower bound is strictly negative - turns into + // the constant -1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = -1; + + lowerSignPhase = SIGN_PHASE_NEGATIVE; + } + else + { + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + double factor = 2.0 / _symbolicUbOfUb[i] * coeffs[1]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= factor; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; + } + + if ( upperSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if ( lowerSignPhase == PHASE_NOT_FIXED ) + { + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } + } + else + { + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; + + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the sign + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ) +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= weight; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicLowerBias[i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= weight; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicUpperBias[i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ) +{ + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // cLower = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // cUpper = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + double aLower = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double aUpper = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if ( aLower >= 0 ) + { + _symbolicLb[j * _size + i] += + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + } + else + { + _symbolicLb[j * _size + i] += + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + } + + if ( aUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; + } + else + { + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; + } + + if ( bLower >= 0 ) + { + _symbolicLb[j * _size + i] += + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + } + else + { + _symbolicLb[j * _size + i] += + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; + } + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( receive ) + { + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +const Vector Layer::OptimalParameterisedSymbolicBoundTightening() +{ + const Map &layers = _layerOwner->getLayerIndexToLayer(); + + // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. + unsigned max_iterations = + GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + unsigned dimension = getNumberOfParameters( layers ); + bool maximize = false; + double sign = ( maximize ? 1 : -1 ); + + Vector lower_bounds( dimension, 0 ); + Vector upper_bounds( dimension, 1 ); + + // Initialize initial guess uniformly. + Vector guess( dimension ); + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + std::uniform_real_distribution dis( 0, 1 ); + for ( unsigned j = 0; j < dimension; ++j ) + { + double lb = lower_bounds[j]; + double ub = upper_bounds[j]; + guess[j] = lb + dis( rng ) * ( ub - lb ); + } + + Vector> candidates( dimension ); + Vector gradient( dimension ); + + for ( unsigned i = 0; i < max_iterations; ++i ) + { + double current_cost = EstimateVolume( guess ); + for ( unsigned j = 0; j < dimension; ++j ) + { + candidates[j] = Vector( guess ); + candidates[j][j] += step_size; + + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + + double cost = EstimateVolume( candidates[j] ); + gradient[j] = ( cost - current_cost ) / step_size + weight_decay * guess[j]; + } + + bool gradient_is_zero = true; + for ( unsigned j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + + for ( unsigned j = 0; j < dimension; ++j ) + { + guess[j] += sign * lr * gradient[j]; + + guess[j] = std::min( guess[j], upper_bounds[j] ); + guess[j] = std::max( guess[j], lower_bounds[j] ); + } + } + + const Vector optimal_coeffs( guess ); + return optimal_coeffs; +} + +double Layer::EstimateVolume( const Vector &coeffs ) +{ + // First, run parameterised symbolic bound propagation. + const Map &layers = _layerOwner->getLayerIndexToLayer(); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( unsigned i = 0; i < layers.size(); ++i ) + { + ASSERT( layers.exists( i ) ); + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + if ( lb == ub ) + continue; + + log_box_volume += std::log( ub - lb ); + } + + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + // Sample input point from known bounds. + Map point; + for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) + { + if ( inputLayer->neuronEliminated( j ) ) + { + point.insert( j, 0 ); + } + else + { + double lb = inputLayer->getLb( j ); + double ub = inputLayer->getUb( j ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( j, dis( rng ) ); + } + } + + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) + { + if ( outputLayer->neuronEliminated( j ) ) + continue; + + double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); + max_margin = std::max( max_margin, margin ); + } + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); + } + + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; +} + +double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const +{ + double lowerSum = _symbolicLowerBias[i]; + double upperSum = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + lowerSum += _symbolicLb[j * _size + i] * point[j]; + upperSum += _symbolicUb[j * _size + i] * point[j]; + } + + return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); +} + + +unsigned Layer::getNumberOfParametersPerType( Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} + +unsigned Layer::getNumberOfParameters( const Map &layers ) const +{ + unsigned index = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + index += getNumberOfParametersPerType( layer->getLayerType() ); + } + return index; +} + +Map> Layer::getParametersForLayers( const Map &layers, + const Vector &coeffs ) const +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); + Vector current_coeffs( n_coeffs ); + for ( unsigned i = 0; i < n_coeffs; i++ ) + { + current_coeffs[i] = coeffs[index + i]; + } + layerIndicesToParameters.insert( layerIndex, current_coeffs ); + index += n_coeffs; + } + return layerIndicesToParameters; +} + +double Layer::softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, const Vector &outputUb, unsigned i ) { @@ -3676,7 +4804,6 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, double li = outputLb[i]; double ui = outputUb[i]; - if ( i == di ) { double val2 = -1; @@ -4006,7 +5133,6 @@ String Layer::typeToString( Type type ) return "BILINEAR"; break; - default: return "UNKNOWN TYPE"; break; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 900276eda3..889a42fdab 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -136,8 +136,24 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); - void computeSymbolicBounds(); void computeIntervalArithmeticBounds(); + void computeSymbolicBounds(); + void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); + + + // Get number of optimizable parameters for parameterised SBT relaxation per layer type. + unsigned getNumberOfParametersPerType( Type t ) const; + + // Get total number of optimizable parameters for parameterised SBT relaxation. + unsigned getNumberOfParameters( const Map &layers ) const; + + // Get map containing vector of optimizable parameters for parameterised SBT relaxation for + // every layer index. + Map> getParametersForLayers( const Map &layers, + const Vector &coeffs ) const; + + // Return optimizable parameters which minimize parameterised SBT bounds' volume. + const Vector OptimalParameterisedSymbolicBoundTightening(); /* Preprocessing functionality: variable elimination and reindexing @@ -292,6 +308,24 @@ class Layer void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); + + /* + Helper functions for parameterised symbolic bound tightening + */ + void computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ); + void computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ); + + // Estimate Volume of parameterised symbolic bound tightening. + double EstimateVolume( const Vector &coeffs ); + + // Return difference between given point and upper and lower bounds determined by parameterised + // SBT relaxation. + double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; + /* Helper functions for interval bound tightening */ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8390a0190b..ed07a1d2d5 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -204,6 +204,17 @@ void NetworkLevelReasoner::symbolicBoundPropagation() _layerIndexToLayer[i]->computeSymbolicBounds(); } +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector &coeffs ) +{ + Map> layerIndicesToParameters = + _layerIndexToLayer[0]->getParametersForLayers( _layerIndexToLayer, coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) + { + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } +} + void NetworkLevelReasoner::deepPolyPropagation() { if ( _deepPolyAnalysis == nullptr ) @@ -221,6 +232,9 @@ void NetworkLevelReasoner::lpRelaxationPropagation() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 2660795be6..03389789c4 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -103,6 +103,12 @@ class NetworkLevelReasoner : public LayerOwner bound on the upper bound of a ReLU node is negative, that ReLU is inactive and its output can be set to 0. + - Parametrised Symbolic: For certain activation functions, there + is a continuum of valid symbolic bounds. We receive a map of + coefficients in range [0, 1] for every layer index, then compute + the parameterised symbolic bounds (or default to regular + symbolic bounds if parameterised bounds not implemented). + - LP Relaxation: invoking an LP solver on a series of LP relaxations of the problem we're trying to solve, and optimizing the lower and upper bounds of each of the @@ -123,6 +129,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); + void parameterisedSymbolicBoundPropagation( const Vector &coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 7f293ec65f..a746f98607 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -73,9 +73,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); @@ -3978,7 +3975,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 1, 1 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -4487,25 +4484,1328 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - bool boundsEqual( const List &bounds, const List &expectedBounds ) + void test_preimage_approximation_relu() { - if ( bounds.size() != expectedBounds.size() ) - return false; + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + + Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1.625, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + + Tightening( 11, 0.8472, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + Tightening( 13, -4.25, Tightening::LB ), + Tightening( 13, 3.25, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 3.25, Tightening::UB ), + + Tightening( 17, -11.1417, Tightening::LB ), + Tightening( 17, 10, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), + Tightening( 19, 10, Tightening::UB ), + + Tightening( 21, -17.3084, Tightening::LB ), + Tightening( 21, 3.2160, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 8.5519, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 8.5519, Tightening::UB ), + + Tightening( 17, -23.6231, Tightening::LB ), + Tightening( 17, 14.0909, Tightening::UB ), + Tightening( 18, 2, Tightening::LB ), + Tightening( 18, 28.2015, Tightening::UB ), + + Tightening( 20, 2, Tightening::LB ), + Tightening( 20, 28.2015, Tightening::UB ), + + Tightening( 21, -29.2015, Tightening::LB ), + Tightening( 21, 6.5734, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 7, -2, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.2, Tightening::LB ), + + Tightening( 10, -1.8, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), + + Tightening( 11, 1.4542, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 3.1, Tightening::UB ), + Tightening( 7, -3.2, Tightening::LB ), + Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -0.2, Tightening::LB ), + Tightening( 8, 3.1, Tightening::UB ), + Tightening( 9, -0.32, Tightening::LB ), + Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -3.8726, Tightening::LB ), + Tightening( 10, 0.03, Tightening::UB ), + Tightening( 11, 0.4074, Tightening::LB ), + Tightening( 11, 11.3243, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), Tightening( 13, 2.975, Tightening::UB ), + Tightening( 13, -4.175, Tightening::LB ), + + Tightening( 14, -0.45, Tightening::LB ), Tightening( 14, 8.5, Tightening::UB ), + Tightening( 15, -0.2225, Tightening::LB ), Tightening( 16, 2.975, Tightening::UB ), + Tightening( 16, -0.4175, Tightening::LB ), + + Tightening( 17, -11.452, Tightening::LB ), Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), Tightening( 18, 16.0688, Tightening::UB ), + + Tightening( 19, -1.1452, Tightening::LB ), Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), Tightening( 20, 16.0688, Tightening::UB ), + + Tightening( 21, -17.0684, Tightening::LB ), Tightening( 21, 3.6767, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 11, -5.6, Tightening::LB ), Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), Tightening( 13, -5.9, Tightening::LB ), + Tightening( 13, 8.0468, Tightening::UB ), + + Tightening( 14, -0.56, Tightening::LB ), Tightening( 14, 16.4636, Tightening::UB ), + Tightening( 15, -0.6029, Tightening::LB ), Tightening( 16, -0.59, Tightening::LB ), + Tightening( 16, 8.0468, Tightening::UB ), + + Tightening( 17, -24.8864, Tightening::LB ), Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), Tightening( 18, 28.0272, Tightening::UB ), + + Tightening( 19, -2.4886, Tightening::LB ), Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), Tightening( 20, 28.0272, Tightening::UB ), + + Tightening( 21, -29.9648, Tightening::LB ), Tightening( 21, 6.9619, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 10, 8.361, Tightening::UB ), + Tightening( 11, -8.361, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 8, 7, Tightening::UB ), + + Tightening( 9, 5.8235, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), + Tightening( 10, 40.7647, Tightening::UB ), + + Tightening( 11, -40.7647, Tightening::LB ), + Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + + Tightening( 15, -0.0001, Tightening::LB ), + Tightening( 15, 78.8787, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + + Tightening( 15, 0, Tightening::LB ), + Tightening( 15, 211.0082, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() < expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; } void updateTableau( MockTableau &tableau, List &tightenings ) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b74cd3931c..e422c14183 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1641,7 +1641,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x6 x7 x8 = softmax(x3, x4, x5) x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 */ @@ -6041,7 +6041,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -6057,7 +6057,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - x5 range: [-2, 4] + x5 range: [-2, 5.5] */ List expectedBounds( { @@ -6113,7 +6113,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -6372,6 +6372,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } + void test_sbt_abs_positive_and_negative() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -7461,20 +7462,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); @@ -7577,9 +7578,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x5 range: [-21, 7] */ List expectedBounds( { Tightening( 2, -1, Tightening::LB ),