Note
Go to the end to download the full example code.
Benchmark the reliability solvers on the problems¶
In this example, we show how to run all the methods on all the problems and get the computed probability.
import openturns as ot
import numpy as np
import otbenchmark as otb
import pandas as pd
from tqdm import tqdm
ot.Log.Show(ot.Log.NONE)
We import the list of reliability problems.
benchmarkProblemList = otb.ReliabilityBenchmarkProblemList()
numberOfProblems = len(benchmarkProblemList)
numberOfProblems
26
For each problem in the benchmark, print the problem name and the exact failure probability.
for i in range(numberOfProblems):
problem = benchmarkProblemList[i]
name = problem.getName()
pf = problem.getProbability()
print("#", i, " : ", name, ", exact PF : ", pf)
# 0 : RP8 , exact PF : 0.0007897927545598118
# 1 : RP14 , exact PF : 0.00077285
# 2 : RP22 , exact PF : 0.004207305511299618
# 3 : RP24 , exact PF : 0.00286
# 4 : RP25 , exact PF : 4.148566293759747e-05
# 5 : RP28 , exact PF : 1.4532945550025393e-07
# 6 : RP31 , exact PF : 0.003226681209587691
# 7 : RP33 , exact PF : 0.00257
# 8 : RP35 , exact PF : 0.00347894632
# 9 : RP38 , exact PF : 0.0081
# 10 : RP53 , exact PF : 0.0313
# 11 : RP55 , exact PF : 0.5600144282863704
# 12 : RP54 , exact PF : 0.000998
# 13 : RP57 , exact PF : 0.0284
# 14 : RP75 , exact PF : 0.00981929872154689
# 15 : RP89 , exact PF : 0.00543
# 16 : RP107 , exact PF : 2.92e-07
# 17 : RP110 , exact PF : 3.19e-05
# 18 : RP111 , exact PF : 7.65e-07
# 19 : RP63 , exact PF : 0.000379
# 20 : RP91 , exact PF : 0.000697
# 21 : RP60 , exact PF : 0.0456
# 22 : RP77 , exact PF : 2.87e-07
# 23 : Four-branch serial system , exact PF : 0.0022227950661944398
# 24 : R-S , exact PF : 0.07864960352514257
# 25 : Axial stressed beam , exact PF : 0.02919819462483095
Run several algorithms on a single problem¶
We want to run several algorithms on a single problem. We set the parameters of the algorithms and run them on a single problem.
maximumEvaluationNumber = 1000
maximumAbsoluteError = 1.0e-3
maximumRelativeError = 1.0e-3
maximumResidualError = 1.0e-3
maximumConstraintError = 1.0e-3
nearestPointAlgorithm = ot.AbdoRackwitz()
nearestPointAlgorithm.setMaximumCallsNumber(maximumEvaluationNumber)
nearestPointAlgorithm.setMaximumAbsoluteError(maximumAbsoluteError)
nearestPointAlgorithm.setMaximumRelativeError(maximumRelativeError)
nearestPointAlgorithm.setMaximumResidualError(maximumResidualError)
nearestPointAlgorithm.setMaximumConstraintError(maximumConstraintError)
i = 3
problem = benchmarkProblemList[i]
metaAlgorithm = otb.ReliabilityBenchmarkMetaAlgorithm(problem)
We try the FORM algorithm.
benchmarkFORM = metaAlgorithm.runFORM(nearestPointAlgorithm)
s1 = benchmarkFORM.summary()
print(s1)
computedProbability = 0.006209245091320793
exactProbability = 0.00286
absoluteError = 0.003349245091320793
numberOfCorrectDigits = 0.0
numberOfFunctionEvaluations = 6
numberOfDigitsPerEvaluation = 0.0
Then the SORM algorithm.
benchmarkSORM = metaAlgorithm.runSORM(nearestPointAlgorithm)
s2 = benchmarkSORM.summary()
print(s2)
computedProbability = 0.006209245091320793
exactProbability = 0.00286
absoluteError = 0.003349245091320793
numberOfCorrectDigits = 0.0
numberOfFunctionEvaluations = 6
numberOfDigitsPerEvaluation = 0.0
benchmarkMC = metaAlgorithm.runMonteCarlo(
maximumOuterSampling=1000000, coefficientOfVariation=0.1, blockSize=1,
)
s3 = benchmarkMC.summary()
print(s3)
computedProbability = 0.003011685339115775
exactProbability = 0.00286
absoluteError = 0.00015168533911577489
numberOfCorrectDigits = 1.275422426296244
numberOfFunctionEvaluations = 33204
numberOfDigitsPerEvaluation = 3.84117102245586e-05
benchmarkFORMIS = metaAlgorithm.runFORMImportanceSampling(
nearestPointAlgorithm,
maximumOuterSampling=1000,
coefficientOfVariation=0.1,
blockSize=1,
)
s4 = benchmarkFORMIS.summary()
print(s4)
computedProbability = 0.002840570180540531
exactProbability = 0.00286
absoluteError = 1.9429819459469092e-05
numberOfCorrectDigits = 2.1678972679446287
numberOfFunctionEvaluations = 725
numberOfDigitsPerEvaluation = 0.002990203128199488
benchmarkSS = metaAlgorithm.runSubsetSampling(
maximumOuterSampling=5000, coefficientOfVariation=0.1, blockSize=1,
)
s5 = benchmarkSS.summary()
print(s5)
computedProbability = 0.0031659999999999913
exactProbability = 0.00286
absoluteError = 0.0003059999999999912
numberOfCorrectDigits = 0.9706446066474755
numberOfFunctionEvaluations = 15000
numberOfDigitsPerEvaluation = 6.470964044316503e-05
Run all algorithms on all problems and produce a single result table¶
For several algorithms and all the reliability problems, we want to estimate the failure probability and compare them.
We create a list of problem names.
problem_names = []
for i in range(numberOfProblems):
problem = benchmarkProblemList[i]
name = problem.getName()
problem_names.append(name)
metrics = [
"Exact",
"FORM",
"SORM",
"Monte Carlo",
"FORM-IS",
"Subset",
]
results = np.zeros((numberOfProblems, len(metrics)))
maximumOuterSampling = 10 ** 2
blockSize = 10 ** 2
coefficientOfVariation = 0.0
for i in tqdm(range(numberOfProblems)):
problem = benchmarkProblemList[i]
results[i][0] = problem.getProbability()
metaAlgorithm = otb.ReliabilityBenchmarkMetaAlgorithm(problem)
benchmarkResult = metaAlgorithm.runFORM(nearestPointAlgorithm)
results[i][1] = benchmarkResult.computedProbability
benchmarkResult = metaAlgorithm.runSORM(nearestPointAlgorithm)
results[i][2] = benchmarkResult.computedProbability
benchmarkResult = metaAlgorithm.runMonteCarlo(
maximumOuterSampling=maximumOuterSampling,
coefficientOfVariation=coefficientOfVariation,
blockSize=blockSize,
)
results[i][3] = benchmarkResult.computedProbability
benchmarkResult = metaAlgorithm.runFORMImportanceSampling(
nearestPointAlgorithm,
maximumOuterSampling=maximumOuterSampling,
coefficientOfVariation=coefficientOfVariation,
blockSize=blockSize,
)
results[i][4] = benchmarkResult.computedProbability
benchmarkResult = metaAlgorithm.runSubsetSampling(
maximumOuterSampling=maximumOuterSampling,
coefficientOfVariation=coefficientOfVariation,
blockSize=blockSize,
)
results[i][5] = benchmarkResult.computedProbability
df = pd.DataFrame(results, index=problem_names, columns=metrics)
# df.to_csv("reliability_benchmark_table-output.csv")
df
0%| | 0/26 [00:00<?, ?it/s]
4%|▍ | 1/26 [00:01<00:38, 1.53s/it]
8%|▊ | 2/26 [00:01<00:19, 1.22it/s]
19%|█▉ | 5/26 [00:01<00:05, 3.81it/s]
27%|██▋ | 7/26 [00:02<00:03, 5.64it/s]
38%|███▊ | 10/26 [00:02<00:01, 8.11it/s]
50%|█████ | 13/26 [00:02<00:01, 10.67it/s]
65%|██████▌ | 17/26 [00:02<00:00, 12.37it/s]
73%|███████▎ | 19/26 [00:02<00:00, 13.37it/s]
81%|████████ | 21/26 [00:04<00:01, 3.35it/s]
88%|████████▊ | 23/26 [00:05<00:00, 3.20it/s]
100%|██████████| 26/26 [00:05<00:00, 4.73it/s]
Run several algorithms on all problems and get detailed statistics¶
Run several algorithms on all reliability benchmark problems: print statistics on each problem.
def FormatRow(benchmarkResult):
"""Format a single row of the benchmark table"""
result = [
benchmarkResult.exactProbability,
benchmarkResult.computedProbability,
benchmarkResult.absoluteError,
benchmarkResult.numberOfCorrectDigits,
benchmarkResult.numberOfFunctionEvaluations,
benchmarkResult.numberOfDigitsPerEvaluation,
]
return result
method_names = ["Monte-Carlo", "FORM", "SORM", "FORM-IS", "SUBSET"]
maximumOuterSampling = 10 ** 2
blockSize = 10 ** 2
coefficientOfVariation = 0.0
result = dict()
for i in range(numberOfProblems):
problem = benchmarkProblemList[i]
name = problem_names[i]
exact_pf_name = "%10s" % ("Exact PF " + name[0:10])
metrics = [
exact_pf_name,
"Estimated PF",
"Absolute Error",
"Correct Digits",
"Function Calls",
"Digits / Evaluation",
]
results = np.zeros((len(method_names), len(metrics)))
metaAlgorithm = otb.ReliabilityBenchmarkMetaAlgorithm(problem)
# Monte-Carlo
benchmarkResult = metaAlgorithm.runMonteCarlo(
maximumOuterSampling=maximumOuterSampling,
coefficientOfVariation=coefficientOfVariation,
blockSize=blockSize,
)
results[0, :] = FormatRow(benchmarkResult)
# FORM
benchmarkResult = metaAlgorithm.runFORM(nearestPointAlgorithm)
results[1, :] = FormatRow(benchmarkResult)
# SORM
benchmarkResult = metaAlgorithm.runSORM(nearestPointAlgorithm)
results[2, :] = FormatRow(benchmarkResult)
# FORM-IS
benchmarkResult = metaAlgorithm.runFORMImportanceSampling(
nearestPointAlgorithm,
maximumOuterSampling=maximumOuterSampling,
coefficientOfVariation=coefficientOfVariation,
blockSize=blockSize,
)
results[3, :] = FormatRow(benchmarkResult)
# Subset
benchmarkResult = metaAlgorithm.runSubsetSampling(
maximumOuterSampling=maximumOuterSampling,
coefficientOfVariation=coefficientOfVariation,
blockSize=blockSize,
)
results[4, :] = FormatRow(benchmarkResult)
# Gather statistics and print them
df = pd.DataFrame(results, index=method_names, columns=metrics,)
# Format the columns for readability
s = df.style.format(
{
exact_pf_name: lambda x: "{:.3e}".format(x),
"Estimated PF": lambda x: "{:.3e}".format(x),
"Absolute Error": lambda x: "{:.3e}".format(x),
"Correct Digits": lambda x: "{:.1f}".format(x),
"Function Calls": lambda x: "{:d}".format(int(x)),
"Digits / Evaluation": lambda x: "{:.1f}".format(x),
}
)
result[name] = s
result["RP33"]
result["RP35"]
Total running time of the script: (0 minutes 11.011 seconds)
otbenchmark