Benchmark the reliability solvers on the problems

In this example, we show how to run all the methods on all the problems and get the computed probability.

import openturns as ot
import numpy as np
import otbenchmark as otb
import pandas as pd
from tqdm import tqdm
ot.Log.Show(ot.Log.NONE)

We import the list of reliability problems.

benchmarkProblemList = otb.ReliabilityBenchmarkProblemList()
numberOfProblems = len(benchmarkProblemList)
numberOfProblems
26

For each problem in the benchmark, print the problem name and the exact failure probability.

for i in range(numberOfProblems):
    problem = benchmarkProblemList[i]
    name = problem.getName()
    pf = problem.getProbability()
    print("#", i, " : ", name, ", exact PF : ", pf)
# 0  :  RP8 , exact PF :  0.0007897927545597477
# 1  :  RP14 , exact PF :  0.00077285
# 2  :  RP22 , exact PF :  0.004207305511299618
# 3  :  RP24 , exact PF :  0.00286
# 4  :  RP25 , exact PF :  4.148566293759747e-05
# 5  :  RP28 , exact PF :  1.4532945550025393e-07
# 6  :  RP31 , exact PF :  0.003226681209587691
# 7  :  RP33 , exact PF :  0.00257
# 8  :  RP35 , exact PF :  0.00347894632
# 9  :  RP38 , exact PF :  0.0081
# 10  :  RP53 , exact PF :  0.0313
# 11  :  RP55 , exact PF :  0.5600144282863704
# 12  :  RP54 , exact PF :  0.000998
# 13  :  RP57 , exact PF :  0.0284
# 14  :  RP75 , exact PF :  0.00981929872154689
# 15  :  RP89 , exact PF :  0.00543
# 16  :  RP107 , exact PF :  2.92e-07
# 17  :  RP110 , exact PF :  3.19e-05
# 18  :  RP111 , exact PF :  7.65e-07
# 19  :  RP63 , exact PF :  0.000379
# 20  :  RP91 , exact PF :  0.000697
# 21  :  RP60 , exact PF :  0.0456
# 22  :  RP77 , exact PF :  2.87e-07
# 23  :  Four-branch serial system , exact PF :  0.0022227950661944398
# 24  :  R-S , exact PF :  0.07864960352514257
# 25  :  Axial stressed beam , exact PF :  0.02919819462483095

Run several algorithms on a single problem

We want to run several algorithms on a single problem. We set the parameters of the algorithms and run them on a single problem.

maximumEvaluationNumber = 1000
maximumAbsoluteError = 1.0e-3
maximumRelativeError = 1.0e-3
maximumResidualError = 1.0e-3
maximumConstraintError = 1.0e-3
nearestPointAlgorithm = ot.AbdoRackwitz()
nearestPointAlgorithm.setMaximumCallsNumber(maximumEvaluationNumber)
nearestPointAlgorithm.setMaximumAbsoluteError(maximumAbsoluteError)
nearestPointAlgorithm.setMaximumRelativeError(maximumRelativeError)
nearestPointAlgorithm.setMaximumResidualError(maximumResidualError)
nearestPointAlgorithm.setMaximumConstraintError(maximumConstraintError)
i = 3
problem = benchmarkProblemList[i]
metaAlgorithm = otb.ReliabilityBenchmarkMetaAlgorithm(problem)

We try the FORM algorithm.

benchmarkFORM = metaAlgorithm.runFORM(nearestPointAlgorithm)
s1 = benchmarkFORM.summary()
print(s1)
computedProbability = 0.006209245091320793
exactProbability = 0.00286
absoluteError = 0.003349245091320793
numberOfCorrectDigits = 0.0
numberOfFunctionEvaluations = 6
numberOfDigitsPerEvaluation = 0.0

Then the SORM algorithm.

benchmarkSORM = metaAlgorithm.runSORM(nearestPointAlgorithm)
s2 = benchmarkSORM.summary()
print(s2)
computedProbability = 0.006209245091320793
exactProbability = 0.00286
absoluteError = 0.003349245091320793
numberOfCorrectDigits = 0.0
numberOfFunctionEvaluations = 6
numberOfDigitsPerEvaluation = 0.0
benchmarkMC = metaAlgorithm.runMonteCarlo(
    maximumOuterSampling=1000000, coefficientOfVariation=0.1, blockSize=1,
)
s3 = benchmarkMC.summary()
print(s3)
computedProbability = 0.002907145764288631
exactProbability = 0.00286
absoluteError = 4.714576428863082e-05
numberOfCorrectDigits = 1.7829233525712238
numberOfFunctionEvaluations = 34398
numberOfDigitsPerEvaluation = 5.183218072478702e-05
benchmarkFORMIS = metaAlgorithm.runFORMImportanceSampling(
    nearestPointAlgorithm,
    maximumOuterSampling=1000,
    coefficientOfVariation=0.1,
    blockSize=1,
)
s4 = benchmarkFORMIS.summary()
print(s4)
computedProbability = 0.0028628304759341157
exactProbability = 0.00286
absoluteError = 2.8304759341155755e-06
numberOfCorrectDigits = 3.004506566445169
numberOfFunctionEvaluations = 636
numberOfDigitsPerEvaluation = 0.004724066928372907
benchmarkSS = metaAlgorithm.runSubsetSampling(
    maximumOuterSampling=5000, coefficientOfVariation=0.1, blockSize=1,
)
s5 = benchmarkSS.summary()
print(s5)
computedProbability = 0.0028660000000000035
exactProbability = 0.00286
absoluteError = 6.0000000000033984e-06
numberOfCorrectDigits = 2.6782147827451532
numberOfFunctionEvaluations = 15000
numberOfDigitsPerEvaluation = 0.00017854765218301022

Run all algorithms on all problems and produce a single result table

For several algorithms and all the reliability problems, we want to estimate the failure probability and compare them.

We create a list of problem names.

problem_names = []
for i in range(numberOfProblems):
    problem = benchmarkProblemList[i]
    name = problem.getName()
    problem_names.append(name)
metrics = [
    "Exact",
    "FORM",
    "SORM",
    "Monte Carlo",
    "FORM-IS",
    "Subset",
]
results = np.zeros((numberOfProblems, len(metrics)))
maximumOuterSampling = 10 ** 2
blockSize = 10 ** 2
coefficientOfVariation = 0.0

for i in tqdm(range(numberOfProblems)):
    problem = benchmarkProblemList[i]
    results[i][0] = problem.getProbability()
    metaAlgorithm = otb.ReliabilityBenchmarkMetaAlgorithm(problem)
    benchmarkResult = metaAlgorithm.runFORM(nearestPointAlgorithm)
    results[i][1] = benchmarkResult.computedProbability
    benchmarkResult = metaAlgorithm.runSORM(nearestPointAlgorithm)
    results[i][2] = benchmarkResult.computedProbability
    benchmarkResult = metaAlgorithm.runMonteCarlo(
        maximumOuterSampling=maximumOuterSampling,
        coefficientOfVariation=coefficientOfVariation,
        blockSize=blockSize,
    )
    results[i][3] = benchmarkResult.computedProbability
    benchmarkResult = metaAlgorithm.runFORMImportanceSampling(
        nearestPointAlgorithm,
        maximumOuterSampling=maximumOuterSampling,
        coefficientOfVariation=coefficientOfVariation,
        blockSize=blockSize,
    )
    results[i][4] = benchmarkResult.computedProbability
    benchmarkResult = metaAlgorithm.runSubsetSampling(
        maximumOuterSampling=maximumOuterSampling,
        coefficientOfVariation=coefficientOfVariation,
        blockSize=blockSize,
    )
    results[i][5] = benchmarkResult.computedProbability

df = pd.DataFrame(results, index=problem_names, columns=metrics)
# df.to_csv("reliability_benchmark_table-output.csv")
df
  0%|          | 0/26 [00:00<?, ?it/s]
  4%|▍         | 1/26 [00:02<00:56,  2.26s/it]
  8%|▊         | 2/26 [00:02<00:27,  1.14s/it]
 15%|█▌        | 4/26 [00:02<00:10,  2.17it/s]
 23%|██▎       | 6/26 [00:02<00:05,  3.46it/s]
 31%|███       | 8/26 [00:03<00:03,  5.04it/s]
 38%|███▊      | 10/26 [00:03<00:02,  6.14it/s]
 50%|█████     | 13/26 [00:03<00:01,  7.96it/s]
 62%|██████▏   | 16/26 [00:03<00:00, 10.72it/s]
 69%|██████▉   | 18/26 [00:03<00:00,  8.72it/s]
 77%|███████▋  | 20/26 [00:06<00:02,  2.68it/s]
 81%|████████  | 21/26 [00:06<00:01,  3.01it/s]
 85%|████████▍ | 22/26 [00:07<00:01,  2.32it/s]
 88%|████████▊ | 23/26 [00:07<00:01,  2.74it/s]
100%|██████████| 26/26 [00:07<00:00,  4.72it/s]
100%|██████████| 26/26 [00:07<00:00,  3.57it/s]
Exact FORM SORM Monte Carlo FORM-IS Subset
RP8 7.897928e-04 6.598878e-04 7.838036e-04 0.0007 8.047663e-04 9.582000e-04
RP14 7.728500e-04 7.003011e-04 6.995436e-04 0.0008 7.726593e-04 8.055000e-04
RP22 4.207306e-03 6.209672e-03 4.390902e-03 0.0033 4.165796e-03 3.880000e-03
RP24 2.860000e-03 6.209245e-03 6.209245e-03 0.0024 2.904780e-03 2.427570e-03
RP25 4.148566e-05 0.000000e+00 0.000000e+00 0.0001 0.000000e+00 3.803378e-05
RP28 1.453295e-07 2.850470e-08 0.000000e+00 0.0000 1.326533e-07 9.320331e-08
RP31 3.226681e-03 2.275013e-02 2.275013e-02 0.0040 3.278085e-03 3.046000e-03
RP33 2.570000e-03 1.349898e-03 1.349898e-03 0.0024 2.510471e-03 2.859000e-03
RP35 3.478946e-03 1.349898e-03 2.134376e-03 0.0041 2.359143e-03 3.822000e-03
RP38 8.100000e-03 7.902212e-03 8.029356e-03 0.0091 8.125416e-03 8.041000e-03
RP53 3.130000e-02 1.180398e-01 2.986164e-02 0.0272 3.252063e-02 3.216000e-02
RP55 5.600144e-01 0.000000e+00 0.000000e+00 0.5589 0.000000e+00 5.540000e-01
RP54 9.980000e-04 5.555704e-02 3.554811e-03 0.0010 1.101458e-03 9.236000e-04
RP57 2.840000e-02 0.000000e+00 0.000000e+00 0.0287 0.000000e+00 2.765000e-02
RP75 9.819299e-03 0.000000e+00 0.000000e+00 0.0072 0.000000e+00 1.003000e-02
RP89 5.430000e-03 2.008594e-09 2.008594e-09 0.0060 7.174867e-05 5.275000e-03
RP107 2.920000e-07 2.866516e-07 2.866516e-07 0.0000 2.929008e-07 3.194601e-07
RP110 3.190000e-05 3.167124e-05 3.167124e-05 0.0000 3.113050e-05 2.391606e-05
RP111 7.650000e-07 0.000000e+00 0.000000e+00 0.0000 0.000000e+00 7.775217e-07
RP63 3.790000e-04 9.999966e-01 0.000000e+00 0.0005 0.000000e+00 3.214000e-04
RP91 6.970000e-04 6.994296e-04 7.011592e-04 0.0010 7.132322e-04 6.945000e-04
RP60 4.560000e-02 4.483968e-02 4.483968e-02 0.0466 4.560900e-02 4.715000e-02
RP77 2.870000e-07 0.000000e+00 0.000000e+00 0.0000 0.000000e+00 2.329994e-07
Four-branch serial system 2.222795e-03 0.000000e+00 0.000000e+00 0.0027 0.000000e+00 2.423000e-03
R-S 7.864960e-02 7.864960e-02 7.864960e-02 0.0825 7.773510e-02 7.925000e-02
Axial stressed beam 2.919819e-02 2.998280e-02 2.933256e-02 0.0298 2.906722e-02 3.011000e-02


Run several algorithms on all problems and get detailed statistics

Run several algorithms on all reliability benchmark problems: print statistics on each problem.

def FormatRow(benchmarkResult):
    """Format a single row of the benchmark table"""
    result = [
        benchmarkResult.exactProbability,
        benchmarkResult.computedProbability,
        benchmarkResult.absoluteError,
        benchmarkResult.numberOfCorrectDigits,
        benchmarkResult.numberOfFunctionEvaluations,
        benchmarkResult.numberOfDigitsPerEvaluation,
    ]
    return result
method_names = ["Monte-Carlo", "FORM", "SORM", "FORM-IS", "SUBSET"]

maximumOuterSampling = 10 ** 2
blockSize = 10 ** 2
coefficientOfVariation = 0.0

result = dict()
for i in range(numberOfProblems):
    problem = benchmarkProblemList[i]
    name = problem_names[i]
    exact_pf_name = "%10s" % ("Exact PF " + name[0:10])
    metrics = [
        exact_pf_name,
        "Estimated PF",
        "Absolute Error",
        "Correct Digits",
        "Function Calls",
        "Digits / Evaluation",
    ]
    results = np.zeros((len(method_names), len(metrics)))
    metaAlgorithm = otb.ReliabilityBenchmarkMetaAlgorithm(problem)
    # Monte-Carlo
    benchmarkResult = metaAlgorithm.runMonteCarlo(
        maximumOuterSampling=maximumOuterSampling,
        coefficientOfVariation=coefficientOfVariation,
        blockSize=blockSize,
    )
    results[0, :] = FormatRow(benchmarkResult)
    # FORM
    benchmarkResult = metaAlgorithm.runFORM(nearestPointAlgorithm)
    results[1, :] = FormatRow(benchmarkResult)
    # SORM
    benchmarkResult = metaAlgorithm.runSORM(nearestPointAlgorithm)
    results[2, :] = FormatRow(benchmarkResult)
    # FORM-IS
    benchmarkResult = metaAlgorithm.runFORMImportanceSampling(
        nearestPointAlgorithm,
        maximumOuterSampling=maximumOuterSampling,
        coefficientOfVariation=coefficientOfVariation,
        blockSize=blockSize,
    )
    results[3, :] = FormatRow(benchmarkResult)
    # Subset
    benchmarkResult = metaAlgorithm.runSubsetSampling(
        maximumOuterSampling=maximumOuterSampling,
        coefficientOfVariation=coefficientOfVariation,
        blockSize=blockSize,
    )
    results[4, :] = FormatRow(benchmarkResult)
    # Gather statistics and print them
    df = pd.DataFrame(results, index=method_names, columns=metrics,)
    # Format the columns for readability
    s = df.style.format(
        {
            exact_pf_name: lambda x: "{:.3e}".format(x),
            "Estimated PF": lambda x: "{:.3e}".format(x),
            "Absolute Error": lambda x: "{:.3e}".format(x),
            "Correct Digits": lambda x: "{:.1f}".format(x),
            "Function Calls": lambda x: "{:d}".format(int(x)),
            "Digits / Evaluation": lambda x: "{:.1f}".format(x),
        }
    )
    result[name] = s
result["RP33"]
  Exact PF RP33 Estimated PF Absolute Error Correct Digits Function Calls Digits / Evaluation
Monte-Carlo 2.570e-03 1.500e-03 1.070e-03 0.4 10000 0.0
FORM 2.570e-03 1.350e-03 1.220e-03 0.3 26 0.0
SORM 2.570e-03 1.350e-03 1.220e-03 0.3 51 0.0
FORM-IS 2.570e-03 2.709e-03 1.393e-04 1.3 10026 0.0
SUBSET 2.570e-03 2.491e-03 7.900e-05 1.5 30000 0.0


result["RP35"]
  Exact PF RP35 Estimated PF Absolute Error Correct Digits Function Calls Digits / Evaluation
Monte-Carlo 3.479e-03 4.300e-03 8.211e-04 0.6 10000 0.0
FORM 3.479e-03 1.350e-03 2.129e-03 0.2 20 0.0
SORM 3.479e-03 2.134e-03 1.345e-03 0.4 33 0.0
FORM-IS 3.479e-03 2.407e-03 1.072e-03 0.5 10020 0.0
SUBSET 3.479e-03 3.877e-03 3.981e-04 0.9 30000 0.0


Total running time of the script: (0 minutes 14.334 seconds)