ࡱ> 1   _ausqom@W bjbjצצ&~9  nVQVQVQXR J\nj uIdfff}Ii i i i i i i $Ln Rp i -.i ffEj @[1[1[1/ xffi [1i [1[1T BX= fu amqVQN5 i TXPj Hj 9 tq )tq = nn tq = ,Z'@[1g4fi i nndҳ f%nnҳNASA Software Safety Guidebook   Forward This document is a product of the NASA Software Program, an Agencywide program to promote the continual improvement of software engineering within NASA. The goals and strategies for this program are documented in the NASA Software Strategic Plan, July 13, 1995. Additional information is available from the Software IV&V Facility on the world-wide-web site http://www.ivv.nasa.gov Contents  TOC \o "1-5" \h \z  HYPERLINK \l "_Toc510937329" 1. INTRODUCTION  PAGEREF _Toc510937329 \h 12  HYPERLINK \l "_Toc510937330" 1.1 Scope  PAGEREF _Toc510937330 \h 12  HYPERLINK \l "_Toc510937331" 1.2 Purpose  PAGEREF _Toc510937331 \h 13  HYPERLINK \l "_Toc510937332" 1.3 Acknowledgments  PAGEREF _Toc510937332 \h 14  HYPERLINK \l "_Toc510937333" 1.4 Associated Documents  PAGEREF _Toc510937333 \h 14  HYPERLINK \l "_Toc510937334" 1.5 Roadmap of this Guidebook  PAGEREF _Toc510937334 \h 14  HYPERLINK \l "_Toc510937335" 2. SOFTWARE SAFETY IN A SYSTEM SAFETY CONTEXT  PAGEREF _Toc510937335 \h 17  HYPERLINK \l "_Toc510937336" 2.1 What is a Hazard?  PAGEREF _Toc510937336 \h 17  HYPERLINK \l "_Toc510937337" 2.2 What Makes Software Hazardous?  PAGEREF _Toc510937337 \h 18  HYPERLINK \l "_Toc510937338" 2.2.1 What is Safety Critical Software?  PAGEREF _Toc510937338 \h 19  HYPERLINK \l "_Toc510937339" 2.2.2 How Does Software Control Hazards?  PAGEREF _Toc510937339 \h 19  HYPERLINK \l "_Toc510937340" 2.2.3 What About Hardware Controls?  PAGEREF _Toc510937340 \h 19  HYPERLINK \l "_Toc510937341" 2.2.4 Caveats with Software Controls  PAGEREF _Toc510937341 \h 20  HYPERLINK \l "_Toc510937342" 2.2.5 What is Fault Tolerance?  PAGEREF _Toc510937342 \h 21  HYPERLINK \l "_Toc510937343" 2.3 The System Safety Program  PAGEREF _Toc510937343 \h 21  HYPERLINK \l "_Toc510937344" 2.3.1 Safety Requirements Determination  PAGEREF _Toc510937344 \h 22  HYPERLINK \l "_Toc510937345" 2.4 Preliminary Hazard Analysis (PHA)  PAGEREF _Toc510937345 \h 23  HYPERLINK \l "_Toc510937346" 2.4.1 PHA Approach  PAGEREF _Toc510937346 \h 24  HYPERLINK \l "_Toc510937347" 2.4.1.1 Identifying Hazards  PAGEREF _Toc510937347 \h 25  HYPERLINK \l "_Toc510937348" 2.4.1.2 Risk Levels  PAGEREF _Toc510937348 \h 26  HYPERLINK \l "_Toc510937349" 2.4.1.3 NASA Policy for Hazard Elimination/Control  PAGEREF _Toc510937349 \h 28  HYPERLINK \l "_Toc510937350" 2.4.2 Preliminary Hazard Analysis Process  PAGEREF _Toc510937350 \h 28  HYPERLINK \l "_Toc510937351" 2.4.3 Tools and Methods for PHA  PAGEREF _Toc510937351 \h 30  HYPERLINK \l "_Toc510937352" 2.4.4 PHA is a Living Document  PAGEREF _Toc510937352 \h 32  HYPERLINK \l "_Toc510937353" 2.5 Software Subsystem Hazard Analysis  PAGEREF _Toc510937353 \h 32  HYPERLINK \l "_Toc510937354" 3. SOFTWARE SAFETY PLANNING  PAGEREF _Toc510937354 \h 33  HYPERLINK \l "_Toc510937355" 3.1 Software Development Life-cycle Approach  PAGEREF _Toc510937355 \h 34  HYPERLINK \l "_Toc510937356" 3.2 Scope of Software Subsystem Safety Effort  PAGEREF _Toc510937356 \h 36  HYPERLINK \l "_Toc510937357" 3.2.1 Identify Safety Critical Software  PAGEREF _Toc510937357 \h 37  HYPERLINK \l "_Toc510937358" 3.2.2 Categorize Safety Critical Software Subsystems  PAGEREF _Toc510937358 \h 38  HYPERLINK \l "_Toc510937359" 3.2.2.1 Software Control Categories  PAGEREF _Toc510937359 \h 39  HYPERLINK \l "_Toc510937360" 3.2.2.2 Software Hazard Criticality Matrix  PAGEREF _Toc510937360 \h 40  HYPERLINK \l "_Toc510937361" 3.2.3.1 Determine Extent of Effort  PAGEREF _Toc510937361 \h 42  HYPERLINK \l "_Toc510937362" 3.2.3.2 Oversight Required  PAGEREF _Toc510937362 \h 43  HYPERLINK \l "_Toc510937363" 3.2.3.3 Tailoring the Effort  PAGEREF _Toc510937363 \h 44  HYPERLINK \l "_Toc510937364" 3.2.3.3.1 Full Software Safety Effort  PAGEREF _Toc510937364 \h 45  HYPERLINK \l "_Toc510937365" 3.2.3.3.2 Moderate Software Safety Effort  PAGEREF _Toc510937365 \h 45  HYPERLINK \l "_Toc510937366" 3.2.3.3.3 Minimum Software Safety Effort  PAGEREF _Toc510937366 \h 46  HYPERLINK \l "_Toc510937367" 3.2.3.3.4 Match the Safety Activities to Meet the Development Effort  PAGEREF _Toc510937367 \h 46  HYPERLINK \l "_Toc510937368" 3.3 Incorporating Software Safety into Software Development  PAGEREF _Toc510937368 \h 47  HYPERLINK \l "_Toc510937369" 4. SAFETY CRITICAL SOFTWARE DEVELOPMENT  PAGEREF _Toc510937369 \h 55  HYPERLINK \l "_Toc510937370" 4.1 Software Concept and Initiation Phase  PAGEREF _Toc510937370 \h 55  HYPERLINK \l "_Toc510937371" 4.2 Software Requirements Phase  PAGEREF _Toc510937371 \h 56  HYPERLINK \l "_Toc510937372" 4.2.1 Development of Software Safety Requirements  PAGEREF _Toc510937372 \h 57  HYPERLINK \l "_Toc510937373" 4.2.1.1 Safety Requirements Flow-down  PAGEREF _Toc510937373 \h 57  HYPERLINK \l "_Toc510937374" 4.2.2 Generic Software Safety Requirements  PAGEREF _Toc510937374 \h 57  HYPERLINK \l "_Toc510937375" 4.2.2.1 Fault and Failure Tolerance/Independence  PAGEREF _Toc510937375 \h 58  HYPERLINK \l "_Toc510937376" 4.2.2.2 Hazardous Commands  PAGEREF _Toc510937376 \h 60  HYPERLINK \l "_Toc510937377" 4.2.2.3 Timing, Sizing and Throughput Considerations  PAGEREF _Toc510937377 \h 61  HYPERLINK \l "_Toc510937378" 4.2.3 Formal Methods - Specification Development  PAGEREF _Toc510937378 \h 63  HYPERLINK \l "_Toc510937379" 4.2.3.1 Why Is Formal Methods Necessary?  PAGEREF _Toc510937379 \h 64  HYPERLINK \l "_Toc510937380" 4.2.3.2 What Is Formal Methods?  PAGEREF _Toc510937380 \h 65  HYPERLINK \l "_Toc510937381" 4.2.4 Model Checking  PAGEREF _Toc510937381 \h 66  HYPERLINK \l "_Toc510937382" 4.2.4.1 How Model Checking Works  PAGEREF _Toc510937382 \h 66  HYPERLINK \l "_Toc510937383" 4.2.4.2 Tools  PAGEREF _Toc510937383 \h 67  HYPERLINK \l "_Toc510937384" 4.2.4.3 Challenges  PAGEREF _Toc510937384 \h 68  HYPERLINK \l "_Toc510937385" 4.2.5 Formal Inspections of Specifications  PAGEREF _Toc510937385 \h 68  HYPERLINK \l "_Toc510937386" 4.2.6 Test Planning  PAGEREF _Toc510937386 \h 69  HYPERLINK \l "_Toc510937387" 4.3 Architectural Design Phase  PAGEREF _Toc510937387 \h 70  HYPERLINK \l "_Toc510937388" 4.3.1 Safety Objectives of Architectural Design  PAGEREF _Toc510937388 \h 70  HYPERLINK \l "_Toc510937389" 4.3.1.1 Fault Containment Regions  PAGEREF _Toc510937389 \h 71  HYPERLINK \l "_Toc510937390" 4.3.1.2 N-Version Programming  PAGEREF _Toc510937390 \h 72  HYPERLINK \l "_Toc510937391" 4.3.1.3 Redundant Architecture  PAGEREF _Toc510937391 \h 73  HYPERLINK \l "_Toc510937392" 4.3.2 Structured Design Techniques  PAGEREF _Toc510937392 \h 73  HYPERLINK \l "_Toc510937393" 4.3.2.1 Object Oriented Analysis and Design  PAGEREF _Toc510937393 \h 75  HYPERLINK \l "_Toc510937394" 4.3.2.2 Unified Modeling Language (UML)  PAGEREF _Toc510937394 \h 77  HYPERLINK \l "_Toc510937395" 4.3.3 Selection of COTS and Reuse  PAGEREF _Toc510937395 \h 78  HYPERLINK \l "_Toc510937396" 4.3.4 Selection of development tools and operating systems  PAGEREF _Toc510937396 \h 78  HYPERLINK \l "_Toc510937397" 4.3.5 Coding Standards  PAGEREF _Toc510937397 \h 78  HYPERLINK \l "_Toc510937398" 4.3.6 Test Plan Update  PAGEREF _Toc510937398 \h 79  HYPERLINK \l "_Toc510937399" 4.4 Detailed Design Phase  PAGEREF _Toc510937399 \h 79  HYPERLINK \l "_Toc510937400" 4.5 Software Implementation  PAGEREF _Toc510937400 \h 81  HYPERLINK \l "_Toc510937401" 4.5.1 Coding Checklists  PAGEREF _Toc510937401 \h 81  HYPERLINK \l "_Toc510937402" 4.5.2 Defensive Programming  PAGEREF _Toc510937402 \h 82  HYPERLINK \l "_Toc510937403" 4.5.3 Refactoring  PAGEREF _Toc510937403 \h 82  HYPERLINK \l "_Toc510937404" 4.5.4 Unit Level Testing  PAGEREF _Toc510937404 \h 83  HYPERLINK \l "_Toc510937405" 4.6 Software Integration and Test  PAGEREF _Toc510937405 \h 84  HYPERLINK \l "_Toc510937406" 4.6.1 Testing Techniques  PAGEREF _Toc510937406 \h 86  HYPERLINK \l "_Toc510937407" 4.6.2 Test Setups and Documentation  PAGEREF _Toc510937407 \h 91  HYPERLINK \l "_Toc510937408" 4.6.3 Integration Testing  PAGEREF _Toc510937408 \h 92  HYPERLINK \l "_Toc510937409" 4.6.4 Object Oriented Testing  PAGEREF _Toc510937409 \h 92  HYPERLINK \l "_Toc510937410" 4.6.5 System Testing  PAGEREF _Toc510937410 \h 93  HYPERLINK \l "_Toc510937411" 4.6.6 Regression Testing  PAGEREF _Toc510937411 \h 94  HYPERLINK \l "_Toc510937412" 4.6.7 Software Safety Testing  PAGEREF _Toc510937412 \h 95  HYPERLINK \l "_Toc510937413" 4.6.8 Test Witnessing  PAGEREF _Toc510937413 \h 96  HYPERLINK \l "_Toc510937414" 4.7 Software Acceptance and Delivery Phase  PAGEREF _Toc510937414 \h 97  HYPERLINK \l "_Toc510937415" 4.8 Software Operations & Maintenance  PAGEREF _Toc510937415 \h 97  HYPERLINK \l "_Toc510937416" 5. SOFTWARE SAFETY ANALYSIS  PAGEREF _Toc510937416 \h 99  HYPERLINK \l "_Toc510937417" 5.1 Software Safety Requirements Analysis  PAGEREF _Toc510937417 \h 100  HYPERLINK \l "_Toc510937418" 5.1.1 Software Safety Requirements Flow-down Analysis  PAGEREF _Toc510937418 \h 100  HYPERLINK \l "_Toc510937419" 5.1.1.1 Checklists and cross references  PAGEREF _Toc510937419 \h 101  HYPERLINK \l "_Toc510937420" 5.1.2 Requirements Criticality Analysis  PAGEREF _Toc510937420 \h 101  HYPERLINK \l "_Toc510937421" 5.1.2.1 Critical Software Characteristics  PAGEREF _Toc510937421 \h 103  HYPERLINK \l "_Toc510937422" 5.1.3 Specification Analysis  PAGEREF _Toc510937422 \h 105  HYPERLINK \l "_Toc510937423" 5.1.3.1 Control-flow analysis  PAGEREF _Toc510937423 \h 106  HYPERLINK \l "_Toc510937424" 5.1.3.2 Information-flow analysis  PAGEREF _Toc510937424 \h 106  HYPERLINK \l "_Toc510937425" 5.1.3.3 Functional simulation models  PAGEREF _Toc510937425 \h 106  HYPERLINK \l "_Toc510937426" 5.1.4 Formal Inspections  PAGEREF _Toc510937426 \h 107  HYPERLINK \l "_Toc510937427" 5.1.5 Timing, Throughput And Sizing Analysis  PAGEREF _Toc510937427 \h 107  HYPERLINK \l "_Toc510937428" 5.1.6 Software Fault Tree Analysis  PAGEREF _Toc510937428 \h 109  HYPERLINK \l "_Toc510937429" 5.1.7 Conclusion  PAGEREF _Toc510937429 \h 109  HYPERLINK \l "_Toc510937430" 5.2 Architectural Design Analysis  PAGEREF _Toc510937430 \h 110  HYPERLINK \l "_Toc510937431" 5.2.1 Update Criticality Analysis  PAGEREF _Toc510937431 \h 110  HYPERLINK \l "_Toc510937432" 5.2.2 Conduct Hazard Risk Assessment  PAGEREF _Toc510937432 \h 111  HYPERLINK \l "_Toc510937433" 5.2.3 Analyze Architectural Design  PAGEREF _Toc510937433 \h 111  HYPERLINK \l "_Toc510937434" 5.2.3.1 Design Reviews  PAGEREF _Toc510937434 \h 112  HYPERLINK \l "_Toc510937435" 5.2.3.2 Prototype/Animation/Simulation  PAGEREF _Toc510937435 \h 112  HYPERLINK \l "_Toc510937436" 5.2.4 Interface Analysis  PAGEREF _Toc510937436 \h 113  HYPERLINK \l "_Toc510937437" 5.2.4.1 Interdependence Analysis  PAGEREF _Toc510937437 \h 113  HYPERLINK \l "_Toc510937438" 5.2.4.2 Independence Analysis  PAGEREF _Toc510937438 \h 113  HYPERLINK \l "_Toc510937439" 5.2.5 Update Timing, Throughput, and Sizing Analysis  PAGEREF _Toc510937439 \h 113  HYPERLINK \l "_Toc510937440" 5.2.6 Update Software Fault Tree Analysis  PAGEREF _Toc510937440 \h 113  HYPERLINK \l "_Toc510937441" 5.2.7 Formal Inspections of Architectural Design Products  PAGEREF _Toc510937441 \h 114  HYPERLINK \l "_Toc510937442" 5.2.8 Formal Methods and Model Checking  PAGEREF _Toc510937442 \h 114  HYPERLINK \l "_Toc510937443" 5.3 Detailed Design Analysis  PAGEREF _Toc510937443 \h 114  HYPERLINK \l "_Toc510937444" 5.3.1 Design Logic Analysis (DLA)  PAGEREF _Toc510937444 \h 115  HYPERLINK \l "_Toc510937445" 5.3.2 Design Data Analysis  PAGEREF _Toc510937445 \h 115  HYPERLINK \l "_Toc510937446" 5.3.3 Design Interface Analysis  PAGEREF _Toc510937446 \h 116  HYPERLINK \l "_Toc510937447" 5.3.4 Design Constraint Analysis  PAGEREF _Toc510937447 \h 117  HYPERLINK \l "_Toc510937448" 5.3.5 Design Functional Analysis  PAGEREF _Toc510937448 \h 117  HYPERLINK \l "_Toc510937449" 5.3.6 Software Element Analysis  PAGEREF _Toc510937449 \h 118  HYPERLINK \l "_Toc510937450" 5.3.7 Rate Monotonic Analysis  PAGEREF _Toc510937450 \h 118  HYPERLINK \l "_Toc510937451" 5.3.8 Dynamic Flowgraph Analysis  PAGEREF _Toc510937451 \h 118  HYPERLINK \l "_Toc510937452" 5.3.9 Markov Modeling  PAGEREF _Toc510937452 \h 119  HYPERLINK \l "_Toc510937453" 5.3.10 Measurement of Complexity  PAGEREF _Toc510937453 \h 119  HYPERLINK \l "_Toc510937454" 5.3.10.1 Function Points  PAGEREF _Toc510937454 \h 120  HYPERLINK \l "_Toc510937455" 5.3.10.2 Function Point extensions  PAGEREF _Toc510937455 \h 121  HYPERLINK \l "_Toc510937456" 5.3.11 Selection of Programming Languages  PAGEREF _Toc510937456 \h 122  HYPERLINK \l "_Toc510937457" 5.3.12 Formal Methods and Model Checking  PAGEREF _Toc510937457 \h 123  HYPERLINK \l "_Toc510937458" 5.3.13 Requirements State Machines  PAGEREF _Toc510937458 \h 123  HYPERLINK \l "_Toc510937459" 5.3.14 Formal Inspections of Detailed Design Products  PAGEREF _Toc510937459 \h 123  HYPERLINK \l "_Toc510937460" 5.3.15 Software Failure Modes and Effects Analysis  PAGEREF _Toc510937460 \h 123  HYPERLINK \l "_Toc510937461" 5.3.16 Updates to Previous Analyses  PAGEREF _Toc510937461 \h 124  HYPERLINK \l "_Toc510937462" 5.4 Code Analysis  PAGEREF _Toc510937462 \h 124  HYPERLINK \l "_Toc510937463" 5.4.1 Code Logic Analysis  PAGEREF _Toc510937463 \h 125  HYPERLINK \l "_Toc510937464" 5.4.2 Code Data Analysis  PAGEREF _Toc510937464 \h 126  HYPERLINK \l "_Toc510937465" 5.4.3 Code Interface Analysis  PAGEREF _Toc510937465 \h 126  HYPERLINK \l "_Toc510937466" 5.4.4 Update Measurement of Complexity  PAGEREF _Toc510937466 \h 126  HYPERLINK \l "_Toc510937467" 5.4.5 Update Design Constraint Analysis  PAGEREF _Toc510937467 \h 126  HYPERLINK \l "_Toc510937468" 5.4.6 Formal Code Inspections, Checklists, and Coding Standards  PAGEREF _Toc510937468 \h 127  HYPERLINK \l "_Toc510937469" 5.4.7 Applying Formal Methods to Code  PAGEREF _Toc510937469 \h 127  HYPERLINK \l "_Toc510937470" 5.4.8 Unused Code Analysis  PAGEREF _Toc510937470 \h 128  HYPERLINK \l "_Toc510937471" 5.4.9 Interrupt Analysis  PAGEREF _Toc510937471 \h 128  HYPERLINK \l "_Toc510937472" 5.4.10 Final Timing, Throughput, and Sizing Analysis  PAGEREF _Toc510937472 \h 129  HYPERLINK \l "_Toc510937473" 5.4.11 Program Slicing  PAGEREF _Toc510937473 \h 129  HYPERLINK \l "_Toc510937474" 5.4.12 Update Software Failure Modes and Effects Analysis  PAGEREF _Toc510937474 \h 129  HYPERLINK \l "_Toc510937475" 5.5 Test Analysis  PAGEREF _Toc510937475 \h 130  HYPERLINK \l "_Toc510937476" 5.5.1 Test Coverage  PAGEREF _Toc510937476 \h 130  HYPERLINK \l "_Toc510937477" 5.5.2 Formal Inspections of Test Plan and Procedures  PAGEREF _Toc510937477 \h 130  HYPERLINK \l "_Toc510937478" 5.5.3 Reliability Modeling  PAGEREF _Toc510937478 \h 131  HYPERLINK \l "_Toc510937479" 5.5.3.1 Criteria for Selecting a Reliability Model  PAGEREF _Toc510937479 \h 131  HYPERLINK \l "_Toc510937480" 5.5.3.2 Issues and Concerns  PAGEREF _Toc510937480 \h 132  HYPERLINK \l "_Toc510937481" 5.5.3.3 Tools  PAGEREF _Toc510937481 \h 132  HYPERLINK \l "_Toc510937482" 5.5.3.4 Dissenting Views  PAGEREF _Toc510937482 \h 133  HYPERLINK \l "_Toc510937483" 5.5.3.5 Resources  PAGEREF _Toc510937483 \h 133  HYPERLINK \l "_Toc510937484" 5.5.4 Checklists of Tests  PAGEREF _Toc510937484 \h 134  HYPERLINK \l "_Toc510937485" 5.5.5 Test Results Analysis  PAGEREF _Toc510937485 \h 134  HYPERLINK \l "_Toc510937486" 5.5.6 Independent Verification and Validation  PAGEREF _Toc510937486 \h 134  HYPERLINK \l "_Toc510937487" 5.5.7 Resources  PAGEREF _Toc510937487 \h 135  HYPERLINK \l "_Toc510937488" 5.6 Operations & Maintenance  PAGEREF _Toc510937488 \h 135  HYPERLINK \l "_Toc510937489" 6. SOFTWARE DEVELOPMENT ISSUES  PAGEREF _Toc510937489 \h 136  HYPERLINK \l "_Toc510937490" 6.1 Safe Subsets of Languages  PAGEREF _Toc510937490 \h 137  HYPERLINK \l "_Toc510937491" 6.2 Insecurities Common to All Languages  PAGEREF _Toc510937491 \h 138  HYPERLINK \l "_Toc510937492" 6.3 Method of Assessment  PAGEREF _Toc510937492 \h 139  HYPERLINK \l "_Toc510937493" 6.4 Languages  PAGEREF _Toc510937493 \h 139  HYPERLINK \l "_Toc510937494" 6.4.1 Ada83 and Ada95 Languages  PAGEREF _Toc510937494 \h 140  HYPERLINK \l "_Toc510937495" 6.4.2 Assembly Languages  PAGEREF _Toc510937495 \h 143  HYPERLINK \l "_Toc510937496" 6.4.3 C Language  PAGEREF _Toc510937496 \h 144  HYPERLINK \l "_Toc510937497" 6.4.4 C++ Language  PAGEREF _Toc510937497 \h 148  HYPERLINK \l "_Toc510937498" 6.4.5 C# Language  PAGEREF _Toc510937498 \h 151  HYPERLINK \l "_Toc510937499" 6.4.6 Forth Language  PAGEREF _Toc510937499 \h 153  HYPERLINK \l "_Toc510937500" 6.4.7 FORTRAN Language  PAGEREF _Toc510937500 \h 154  HYPERLINK \l "_Toc510937501" 6.4.8 Java Language  PAGEREF _Toc510937501 \h 155  HYPERLINK \l "_Toc510937502" 6.4.6 LabVIEW  PAGEREF _Toc510937502 \h 157  HYPERLINK \l "_Toc510937503" 6.4.7 Pascal Language  PAGEREF _Toc510937503 \h 158  HYPERLINK \l "_Toc510937504" 6.4.8 Visual Basic  PAGEREF _Toc510937504 \h 159  HYPERLINK \l "_Toc510937505" 6.5 Miscellaneous Problems Present in Most Languages  PAGEREF _Toc510937505 \h 159  HYPERLINK \l "_Toc510937506" 6.6 Programming Languages: Conclusions  PAGEREF _Toc510937506 \h 161  HYPERLINK \l "_Toc510937507" 6.7 Compilers, Editors, Debuggers, IDEs and other Tools  PAGEREF _Toc510937507 \h 162  HYPERLINK \l "_Toc510937508" 6.8 CASE tools and Automatic Code Generation  PAGEREF _Toc510937508 \h 164  HYPERLINK \l "_Toc510937509" 6.8.1 Computer-Aided Software Engineering (CASE)  PAGEREF _Toc510937509 \h 164  HYPERLINK \l "_Toc510937510" 6.8.2 Automatic Code Generation  PAGEREF _Toc510937510 \h 166  HYPERLINK \l "_Toc510937511" 6.8.2.1 Visual Languages  PAGEREF _Toc510937511 \h 166  HYPERLINK \l "_Toc510937512" 6.8.2.2 Visual Programming Environments  PAGEREF _Toc510937512 \h 167  HYPERLINK \l "_Toc510937513" 6.8.2.3 Code Generation from Design Models  PAGEREF _Toc510937513 \h 167  HYPERLINK \l "_Toc510937514" 6.9 Software Configuration Management  PAGEREF _Toc510937514 \h 169  HYPERLINK \l "_Toc510937515" 6.9.1 Change control  PAGEREF _Toc510937515 \h 170  HYPERLINK \l "_Toc510937516" 6.9.2 Versioning  PAGEREF _Toc510937516 \h 170  HYPERLINK \l "_Toc510937517" 6.9.3 Status Accounting  PAGEREF _Toc510937517 \h 171  HYPERLINK \l "_Toc510937518" 6.9.4 Defect Tracking  PAGEREF _Toc510937518 \h 172  HYPERLINK \l "_Toc510937519" 6.9.5 Metrics from your SCM system  PAGEREF _Toc510937519 \h 172  HYPERLINK \l "_Toc510937520" 6.9.6 What to include in your SCM system  PAGEREF _Toc510937520 \h 173  HYPERLINK \l "_Toc510937521" 6.10 Operating Systems  PAGEREF _Toc510937521 \h 174  HYPERLINK \l "_Toc510937522" 6.10.1 Types of operating systems  PAGEREF _Toc510937522 \h 174  HYPERLINK \l "_Toc510937523" 6.10.2 Do I really need a real-time operating system (RTOS)?  PAGEREF _Toc510937523 \h 174  HYPERLINK \l "_Toc510937524" 6.10.3 What to look for in an RTOS  PAGEREF _Toc510937524 \h 175  HYPERLINK \l "_Toc510937525" 6.10.4 Commonly used Operating Systems  PAGEREF _Toc510937525 \h 177  HYPERLINK \l "_Toc510937526" 6.11 Distributed Computing  PAGEREF _Toc510937526 \h 178  HYPERLINK \l "_Toc510937527" 6.12 Programmable Logic Devices  PAGEREF _Toc510937527 \h 181  HYPERLINK \l "_Toc510937528" 6.12.1 Types of Programmable Logic Devices  PAGEREF _Toc510937528 \h 182  HYPERLINK \l "_Toc510937529" 6.12.2 Program Once Devices  PAGEREF _Toc510937529 \h 182  HYPERLINK \l "_Toc510937530" 6.12.3 Reprogram in the Field Devices  PAGEREF _Toc510937530 \h 183  HYPERLINK \l "_Toc510937531" 6.12.4 Configurable Computing  PAGEREF _Toc510937531 \h 183  HYPERLINK \l "_Toc510937532" 6.12.5 Safety and Programmable Logic Devices  PAGEREF _Toc510937532 \h 184  HYPERLINK \l "_Toc510937533" 6.13 Embedded Web Technology  PAGEREF _Toc510937533 \h 186  HYPERLINK \l "_Toc510937534" 6.13.1 Embedded Web Servers  PAGEREF _Toc510937534 \h 186  HYPERLINK \l "_Toc510937535" 6.13.2 Testing Techniques  PAGEREF _Toc510937535 \h 187  HYPERLINK \l "_Toc510937536" 6.14 AI and Autonomous Systems  PAGEREF _Toc510937536 \h 188  HYPERLINK \l "_Toc510937537" 6.14.1 Examples of Intelligent Autonomous Systems (IAS)  PAGEREF _Toc510937537 \h 189  HYPERLINK \l "_Toc510937538" 6.14.2 Problems and Concerns  PAGEREF _Toc510937538 \h 190  HYPERLINK \l "_Toc510937539" 6.14.3 Case Study Remote Agent on Deep Space 1  PAGEREF _Toc510937539 \h 191  HYPERLINK \l "_Toc510937540" 6.14.3.1 Remote Agent Description  PAGEREF _Toc510937540 \h 192  HYPERLINK \l "_Toc510937541" 6.14.3.2 Testing and Verification of Remote Agent  PAGEREF _Toc510937541 \h 192  HYPERLINK \l "_Toc510937542" 6.14.3.3 In-flight Validation: How well did it work?  PAGEREF _Toc510937542 \h 194  HYPERLINK \l "_Toc510937543" 6.15 Good Programming Practices for Safety  PAGEREF _Toc510937543 \h 195  HYPERLINK \l "_Toc510937544" 6.16 Wrapping it all up  PAGEREF _Toc510937544 \h 200  HYPERLINK \l "_Toc510937545" 7. SOFTWARE ACQUISITION  PAGEREF _Toc510937545 \h 201  HYPERLINK \l "_Toc510937546" 7.1 Off-the-Shelf Software  PAGEREF _Toc510937546 \h 202  HYPERLINK \l "_Toc510937547" 7.1.1 Purchasing or Reusing OTS Software: Recommendations  PAGEREF _Toc510937547 \h 204  HYPERLINK \l "_Toc510937548" 7.1.2 Integrating OTS Software into your System  PAGEREF _Toc510937548 \h 208  HYPERLINK \l "_Toc510937549" 7.1.2.1 Sticky stuff: Glueware and Wrapper Functions  PAGEREF _Toc510937549 \h 208  HYPERLINK \l "_Toc510937550" 7.1.2.2 Redundant Architecture  PAGEREF _Toc510937550 \h 209  HYPERLINK \l "_Toc510937551" 7.1.2.3 Adding or Adapting Functionality  PAGEREF _Toc510937551 \h 209  HYPERLINK \l "_Toc510937552" 7.1.2.4 Dealing with Extra Functionality  PAGEREF _Toc510937552 \h 210  HYPERLINK \l "_Toc510937553" 7.1.3 Special Problems with Reused Software  PAGEREF _Toc510937553 \h 211  HYPERLINK \l "_Toc510937554" 7.1.4 Who Tests the OTS? (Us vs. Them)  PAGEREF _Toc510937554 \h 211  HYPERLINK \l "_Toc510937555" 7.1.3.1 Recommended Analyses and Tests  PAGEREF _Toc510937555 \h 213  HYPERLINK \l "_Toc510937556" 7.2 Contractor-developed Software  PAGEREF _Toc510937556 \h 214  HYPERLINK \l "_Toc510937557" 7.2.1 Contract Inclusions  PAGEREF _Toc510937557 \h 214  HYPERLINK \l "_Toc510937558" 7.1.2.1 Safety Process  PAGEREF _Toc510937558 \h 215  HYPERLINK \l "_Toc510937559" 7.1.2.2 Analysis and Test  PAGEREF _Toc510937559 \h 215  HYPERLINK \l "_Toc510937560" 7.1.2.3 Software Assurance and Development Process  PAGEREF _Toc510937560 \h 215  HYPERLINK \l "_Toc510937561" 7.1.2.3 Contractor Surveillance  PAGEREF _Toc510937561 \h 216  HYPERLINK \l "_Toc510937562" 7.1.2.4 Software Deliverables  PAGEREF _Toc510937562 \h 216  HYPERLINK \l "_Toc510937563" 7.1.2.5 Independent Verification and Validation (IV&V)  PAGEREF _Toc510937563 \h 217  HYPERLINK \l "_Toc510937564" 7.1.2.6 Software Change Process  PAGEREF _Toc510937564 \h 217  HYPERLINK \l "_Toc510937565" 7.1.2.7 Requirements Specification  PAGEREF _Toc510937565 \h 217  HYPERLINK \l "_Toc510937566" 7.2.2 Monitoring Contractor Processes  PAGEREF _Toc510937566 \h 217  HYPERLINK \l "_Toc510937567" 7.2.3 Recommended Software Testing  PAGEREF _Toc510937567 \h 218  HYPERLINK \l "_Toc510937568" 8. REFERENCES  PAGEREF _Toc510937568 \h 219  HYPERLINK \l "_Toc510937569" APPENDIX A  PAGEREF _Toc510937569 \h 229  HYPERLINK \l "_Toc510937570" Glossary of Terms  PAGEREF _Toc510937570 \h 229  HYPERLINK \l "_Toc510937571" APPENDIX B Software Fault Tree Analysis (SFTA)  PAGEREF _Toc510937571 \h 249  HYPERLINK \l "_Toc510937572" B.1 Software Fault Tree Analysis Description  PAGEREF _Toc510937572 \h 249  HYPERLINK \l "_Toc510937573" B.2 Goal of Software Fault Tree Analysis  PAGEREF _Toc510937573 \h 249  HYPERLINK \l "_Toc510937574" B.3 Use of Software Fault Tree Analysis  PAGEREF _Toc510937574 \h 251  HYPERLINK \l "_Toc510937575" B.4 Benefits Of Software Fault Tree Analysis  PAGEREF _Toc510937575 \h 253  HYPERLINK \l "_Toc510937576" APPENDIX C Software Failure Modes and Effects Analysis  PAGEREF _Toc510937576 \h 257  HYPERLINK \l "_Toc510937577" C.1 Terminology  PAGEREF _Toc510937577 \h 257  HYPERLINK \l "_Toc510937578" C.2 Why do an SFMEA?  PAGEREF _Toc510937578 \h 258  HYPERLINK \l "_Toc510937579" C.3 Issues with SFMEA  PAGEREF _Toc510937579 \h 258  HYPERLINK \l "_Toc510937580" C.4 The SFMEA Process  PAGEREF _Toc510937580 \h 260  HYPERLINK \l "_Toc510937581" C.4.1 Identify Project/system Components  PAGEREF _Toc510937581 \h 260  HYPERLINK \l "_Toc510937582" C.4.2 Ground Rules  PAGEREF _Toc510937582 \h 261  HYPERLINK \l "_Toc510937583" C.4.3 Identify Failures  PAGEREF _Toc510937583 \h 263  HYPERLINK \l "_Toc510937584" C.4.3.1 Examination of Normal Operations as Part of the System  PAGEREF _Toc510937584 \h 264  HYPERLINK \l "_Toc510937585" C.4.3.2 Identify Possible Areas for Faults  PAGEREF _Toc510937585 \h 264  HYPERLINK \l "_Toc510937586" C.4.3.3 Possible Failure Modes  PAGEREF _Toc510937586 \h 265  HYPERLINK \l "_Toc510937587" C.4.3.4 Start at the Bottom  PAGEREF _Toc510937587 \h 265  HYPERLINK \l "_Toc510937588" C.4.4 Identify Consequences of each Failure  PAGEREF _Toc510937588 \h 266  HYPERLINK \l "_Toc510937589" C.4.5 Detection and Compensation  PAGEREF _Toc510937589 \h 267  HYPERLINK \l "_Toc510937590" C.4.6 Design Changes  PAGEREF _Toc510937590 \h 267  HYPERLINK \l "_Toc510937591" C.4.7 Impacts of Corrective Changes  PAGEREF _Toc510937591 \h 268  HYPERLINK \l "_Toc510937592" C.4.8 Example forms  PAGEREF _Toc510937592 \h 269  HYPERLINK \l "_Toc510937593" APPENDIX D Requirements State Machines  PAGEREF _Toc510937593 \h 270  HYPERLINK \l "_Toc510937594" D.1 Characteristics of State Machines  PAGEREF _Toc510937594 \h 270  HYPERLINK \l "_Toc510937595" D.2 Properties of Safe State Machines  PAGEREF _Toc510937595 \h 270  HYPERLINK \l "_Toc510937596" D.3 Input/Output Variables  PAGEREF _Toc510937596 \h 270  HYPERLINK \l "_Toc510937597" D.4 State Attributes  PAGEREF _Toc510937597 \h 270  HYPERLINK \l "_Toc510937598" D.5 Trigger Predicates  PAGEREF _Toc510937598 \h 270  HYPERLINK \l "_Toc510937599" D.6 Output Predicates  PAGEREF _Toc510937599 \h 270  HYPERLINK \l "_Toc510937600" D.7 Degraded Mode Operation  PAGEREF _Toc510937600 \h 270  HYPERLINK \l "_Toc510937601" D.8 Feedback Loop Analysis  PAGEREF _Toc510937601 \h 270  HYPERLINK \l "_Toc510937602" D.9 Transition Characteristics  PAGEREF _Toc510937602 \h 270  HYPERLINK \l "_Toc510937603" D.10 Conclusions  PAGEREF _Toc510937603 \h 270  HYPERLINK \l "_Toc510937604" APPENDIX E  PAGEREF _Toc510937604 \h 270  HYPERLINK \l "_Toc510937605" E.1 Checklists for Off-the-Shelf (OTS) Items  PAGEREF _Toc510937605 \h 270  HYPERLINK \l "_Toc510937606" E.2 Generic Software Safety Requirements From MSFC  PAGEREF _Toc510937606 \h 270  HYPERLINK \l "_Toc510937607" E.3 Design for Safety Checklist  PAGEREF _Toc510937607 \h 270  HYPERLINK \l "_Toc510937608" E.4 Checklist of generic (language independent) programming practices  PAGEREF _Toc510937608 \h 270  HYPERLINK \l "_Toc510937609" E.5 Checklist of assembly programming practices for safety  PAGEREF _Toc510937609 \h 270  HYPERLINK \l "_Toc510937610" E.6 Checklist of C programming practices for safety  PAGEREF _Toc510937610 \h 270  HYPERLINK \l "_Toc510937611" E.7 Checklist of C++ programming practices for safety  PAGEREF _Toc510937611 \h 270  HYPERLINK \l "_Toc510937612" E.8 Checklist of Fortran programming practices for safety  PAGEREF _Toc510937612 \h 270  HYPERLINK \l "_Toc510937613" E.9 Checklist of Pascal programming practices for safety  PAGEREF _Toc510937613 \h 270  HYPERLINK \l "_Toc510937614" E.10 Checklist for Visual Basic  PAGEREF _Toc510937614 \h 270  HYPERLINK \l "_Toc510937615" E.11 Checklist for selecting an RTOS  PAGEREF _Toc510937615 \h 270  HYPERLINK \l "_Toc510937616" E.12 Good Programming Practices Checklist  PAGEREF _Toc510937616 \h 270  HYPERLINK \l "_Toc510937617" E.13 Software Requirements Phase Checklist  PAGEREF _Toc510937617 \h 270  HYPERLINK \l "_Toc510937618" E.14 Architectural Design Phase Checklist  PAGEREF _Toc510937618 \h 270  HYPERLINK \l "_Toc510937619" E.15 Detailed Design Phase Checklist  PAGEREF _Toc510937619 \h 270  HYPERLINK \l "_Toc510937620" E.16 Implementation Phase Checklist  PAGEREF _Toc510937620 \h 270  HYPERLINK \l "_Toc510937621" E.17 Software Testing Phase Checklist  PAGEREF _Toc510937621 \h 270  HYPERLINK \l "_Toc510937622" E.18 Dynamic Testing Checklist  PAGEREF _Toc510937622 \h 270  HYPERLINK \l "_Toc510937623" E.19 Software System Testing Checklist  PAGEREF _Toc510937623 \h 270  Figures  TOC \f F \h \z \t "Table of Figures" \c  HYPERLINK \l "_Toc509733908" Figure 2-1 Hazard Analysis  PAGEREF _Toc509733908 \h 22  HYPERLINK \l "_Toc509733909" Figure 3-1 Elements of a Safety Process  PAGEREF _Toc509733909 \h 33  HYPERLINK \l "_Toc509733910" Figure 3-2 Relationship of Risk Indices  PAGEREF _Toc509733910 \h 42  HYPERLINK \l "_Toc509733911" Figure B-1: - SFTA Graphical Representation Symbols  PAGEREF _Toc509733911 \h 254  HYPERLINK \l "_Toc509733912" Figure B-2: - Example of High Level Fault Tree  PAGEREF _Toc509733912 \h 255  HYPERLINK \l "_Toc509733913" Figure B-3: - Example Code Fault Tree  PAGEREF _Toc509733913 \h 256  HYPERLINK \l "_Toc509733914" Figure C-1  PAGEREF _Toc509733914 \h 260  HYPERLINK \l "_Toc509733915" Figure C-2  PAGEREF _Toc509733915 \h 263  HYPERLINK \l "_Toc509733916" Figure D-1 Example of State Transition Diagram  PAGEREF _Toc509733916 \h 270  HYPERLINK \l "_Toc509733917" Figure D-2 Example RSM and Signals  PAGEREF _Toc509733917 \h 270  Tables  TOC \f T \h \z \t "Caption" \c  HYPERLINK \l "_Toc509733956" Table 2-1 Hazard Causes and Controls - Examples  PAGEREF _Toc509733956 \h 20  HYPERLINK \l "_Toc509733957" Table 2-2 Generic Hazards Checklist  PAGEREF _Toc509733957 \h 25  HYPERLINK \l "_Toc509733958" Table 2-3 Hazard Prioritization - System Risk Index  PAGEREF _Toc509733958 \h 27  HYPERLINK \l "_Toc509733959" Table 2-4 System Risk Level  PAGEREF _Toc509733959 \h 27  HYPERLINK \l "_Toc509733960" Table 3-1 NASA Software Life-cycle - Reviews and Documents  PAGEREF _Toc509733960 \h 34  HYPERLINK \l "_Toc509733961" Table 3-2 Software Subsystem Categories  PAGEREF _Toc509733961 \h 40  HYPERLINK \l "_Toc509733962" Table 3-3 Software Hazard Criticality Matrix  PAGEREF _Toc509733962 \h 41  HYPERLINK \l "_Toc509733963" Table 3-4 Software Hazard Risk Index  PAGEREF _Toc509733963 \h 41  HYPERLINK \l "_Toc509733964" Table 3-5 Required Software Safety Effort  PAGEREF _Toc509733964 \h 43  HYPERLINK \l "_Toc509733965" Table 3-6 Degree of Oversight vs. System Risk  PAGEREF _Toc509733965 \h 43  HYPERLINK \l "_Toc509733966" Table 3-7 Software Requirements Phase  PAGEREF _Toc509733966 \h 49  HYPERLINK \l "_Toc509733967" Table 3-8 Software Architectural Design Phase  PAGEREF _Toc509733967 \h 49  HYPERLINK \l "_Toc509733968" Table 3-9 Software Detailed Design Phase  PAGEREF _Toc509733968 \h 50  HYPERLINK \l "_Toc509733969" Table 3-10 Software Implementation Phase  PAGEREF _Toc509733969 \h 51  HYPERLINK \l "_Toc509733970" Table 3-11 Software Testing Phase  PAGEREF _Toc509733970 \h 52  HYPERLINK \l "_Toc509733971" Table 3-12 Dynamic Testing (Unit or Integration Level)  PAGEREF _Toc509733971 \h 53  HYPERLINK \l "_Toc509733972" Table 3-13 Software System Testing  PAGEREF _Toc509733972 \h 54  HYPERLINK \l "_Toc509733973" Table 4-1 Software Safety Documentation  PAGEREF _Toc509733973 \h 55  HYPERLINK \l "_Toc509733974" Table 4-2 Subsystem Criticality Analysis Report Form  PAGEREF _Toc509733974 \h 69  HYPERLINK \l "_Toc509733975" Table 5-1 Subsystem Criticality Matrix  PAGEREF _Toc509733975 \h 105  1. INTRODUCTION This NASA Software Safety Guidebook was prepared by the NASA Glenn Research Center, Office of Safety Assurance Technologies, under a Center Software Initiative Proposal (CSIP) task for the National Aeronautics and Space Administration. The NASA Software Safety Standard NASA-STD-8719.13A [1] prepared by NASA HQ addresses the who, what, when and why of Software Safety Analyses. This Software Safety Guidebook addresses the how to. 1.1 Scope The focus of this document is on analysis and development of safety critical software, including firmware (e.g. software residing in non-volatile memory, such as ROM, EPROM, or EEPROM) and programmable logic. This Guidebook provides information on development activities and analyses used in the creation and assurance of safety critical software. Resource data required for each task, methodologies and tools for performing the task, and the output products are detailed. It also describes how to address software safety in the overall software development, management, and risk management activities. This Guidebook goes on to describe techniques and procedures. Some techniques are well established and are illustrated in detail (or good reference sources are provided). Other techniques or analyses are new, and not much information is available. The Guidebook attempts to give a flavor of the technique or procedure as well as pointing to sources of more information. To make the guidebook more practical, it contains analysis examples and possible pitfalls and problems that may be encountered during the analysis process. It is a synergistic collection of techniques either already in use throughout NASA and industry, or which have potential for use. Opinions differ widely concerning the validity of the various techniques, and this Guidebook attempts to present these opinions, without prejudging their validity. In most cases there are few or no metrics to quantitatively evaluate or compare the techniques. Moreover, this Guidebook is meant not only to provide possible techniques and analyses, but to open the reader to how to think about software from a safety perspective. It is important to observe software development with a safety eye. This Guidebook points out things to look for (and look out for) in the development of safety critical software. Development approaches, safety analyses, and testing methodologies that lead to improved safety in the software product are included. Numerous existing documents provide details on various analysis techniques. If a technique is well described elsewhere, references are provided. If a NASA standard or guideline exists which defines the format and/or content of a specific document, it is referenced and the user should follow the instructions of that document. In addition to the existing techniques in the literature, some practical methods are presented which have been developed and used successfully at the system level for top-down software hazards analyses. Their approach is similar to NSTS 13830 Implementation Procedure for NASA Payload System Safety Requirements [2]. There are many different analysis techniques described in the open literature that are brought together, evaluated, and compared. This guidebook addresses the value added versus cost of each technique with respect to the overall software development and assurance goals. The reader is expected to have some familiarity with the NASA methodologies for system safety analysis and/or software development. However, no experience with either is assumed or required. Readers completely unfamiliar with NASA methodologies for software development and system safety may have difficulty with some portions of this guidebook. Acronyms and definitions of terminology used in this guidebook are contained in  HYPERLINK \l "_APPENDIX_A" Appendix-A. 1.2 Purpose The purpose of this guidebook is to aid organizations involved in the development and assurance of safety critical software (i.e. software developers, software managers, software assurance, system safety and software safety organizations). It is meant to help both system safety people who are unfamiliar with software, and the software development organization which is unfamiliar with safety. This guidebook focuses on software development and the tasks and analyses associated with it. Guidance on the acquisition of software, either commercial off-the-shelf or created under contract, is given in HYPERLINK \l "_7._SOFTWARE_ACQUISITION_3"Section 7. While the focus of this guidebook is on the development of software for safety-critical systems, much of the information and guidance is also appropriate to the creation of mission critical software. 1.3 Acknowledgments Much of the material presented in this Guidebook has been based directly or indirectly on a variety of sources (NASA, government agencies, technical literature sources), and contains some original material previously undocumented. These sources are too numerous to list here, but are appropriately referenced throughout. A special acknowledgment is owed to the NASA/Caltech Jet Propulsion Laboratory of Pasadena, California, whose draft Software Systems Safety Handbook [4] has been used verbatim or slightly modified in several sections of this Guidebook. Our gratitude goes to the many NASA engineers and contractors who reviewed drafts of the guidebook. We also thank the American Society of Safety Engineers for permission to reproduce portions of the paper Gowen, Lon D. and Collofello, James S. Design Phase Considerations for Safety-Critical Software Systems. PROFESSIONAL SAFETY, April, 1995. Associated Documents Documents detailing software safety standards, software development standards, and guidebooks are listed in  HYPERLINK \l "_7._REFERENCES" Section 8 References. Included are NASA Standards for software, as well as IEEE and military standards. 1.5 Roadmap of this Guidebook This Guidebook provides information for two diverse groups: system safety and software development/management. It attempts to provide necessary software knowledge to the system safety engineer, specifically an increased understanding of the role that software plays in safety critical systems. In addition, software safety analysis techniques are described that may be performed by the safety engineer. For the software developer (or software management), this guidebook provides information on development techniques and analyses that increase the safety of the software. More importantly, the software developer is presented with a new way of looking at the software - with a safety eye. HYPERLINK \l "_2._SOFTWARE_SAFETY"Section 2 provides a preliminary look at the concepts of system safety, and how software relates to it. The section is written primarily for software developers, though system safety engineers will learn about various types of software that should be considered in a system safety context. HYPERLINK \l "_3._SOFTWARE_SAFETY_2"Section 3 gives a more in-depth look at software safety. In particular, guidance is provided on how to scope the safety effort and tailor the processes and analyses to the required level of effort. Details on the specific development processes and safety analyses are provided in the following sections.  HYPERLINK \l "_4._SAFETY_CRITICAL_1" Section 4 discusses development processes and techniques used by the software engineers (developers) while creating safety critical software. These techniques are described in detail, and are organized by the development lifecycle.  HYPERLINK \l "_5._SOFTWARE_SAFETY_1" Section 5 details the safety analyses to be performed, organized by the software development lifecycle. This section is written primarily for the safety engineer, though the software developer should be aware of what the analyses entail.  HYPERLINK \l "_6._Programming_Languages_1" Section 6 is a collection of specific problem areas that we felt should be addressed. Much of this section will be of interest to software developers. Safety engineers may wish to skim this section to obtain a better understanding of software. HYPERLINK \l "_7._SOFTWARE_ACQUISITION_3"Section 7 discusses the acquisition of software. Both COTS/GOTS (commercial and government off-the-shelf) software and software created under contract are considered.  HYPERLINK \l "_8._REFERENCES" Section 8 contains reference and resource information.  HYPERLINK \l "_APPENDIX_A" Appendix A provides definitions of commonly used terms and a list of acronyms. Appendices B, C, and D provide details on three analysis techniques (HYPERLINK \l "_APPENDIX_B_Software"Software Fault Tree Analysis,  HYPERLINK \l "_APPENDIX_C_1" Software Failure Modes and Effects Analysis, and  HYPERLINK \l "_APPENDIX_D" Requirements State Machine).  HYPERLINK \l "_APPENDIX_E" Appendix E contains a collection of checklists. For those looking for particular tasks to perform at any specified time, the following chart is a roadmap that shows which tasks to perform for a given life cycle phase. For a more detailed breakdown of activities, see  HYPERLINK \l "table31" Table 3-1 NASA Software Life-cycle - Reviews and Documents. Life Cycle PhaseTasks and PrioritiesHow To: Development TasksHow To: Analysis TasksProject Management Section 2 System Safety ProgramSection 2.2 Which Software is Hazardous?Section 2.3 Preliminary Hazard AnalysisConcept InitiationTable 3-7 Software Requirements PhaseSection 4.1 Software Concept and Initiation PhaseSection 5.1 Software Safety Requirements AnalysisSoftware RequirementsTable 3-7 Software Requirements PhaseSection 4.2 Software Requirements PhaseSection 5.1 Software Safety Requirements AnalysisSoftware Architectural DesignTable 3-8 Software Architectural Design PhaseSection 4.3 Architectural Design PhaseSection 5.2 Architectural Design AnalysisSoftware Detailed DesignTable 3-9 Software Detailed Design PhaseSection 4.4 Detailed Design PhaseSection 5.3 Detailed Design AnalysisSoftware ImplementationTable 3-10 Software Implementation PhaseSection 4.5 Software ImplementationSection 5.4 Code AnalysisSoftware TestTable 3-11 Software Testing Phase Table 3-12 Dynamic Testing Table 3-13 Software System TestingSection 4.6 Software Integration and TestSection 5.5 Test Analysis 2. SOFTWARE SAFETY IN A SYSTEM SAFETY CONTEXT Safety is a team effort and is everyones responsibility. Software is a vital part of the system. Project managers, systems engineers, software lead and engineers, software assurance or QA, and system safety personnel all play a part in creating a safe system. The goal is for the software to contribute to the safety and functionality of the whole system. But how do you know if any of your software is unsafe? What are the hazards that software may cause, or that software may control? Why should you even care about software safety? When a device or system can lead to injury, death, or the loss of vital (and expensive) equipment, system safety is involved. Often hardware devices are used to mitigate the hazard potential, or to provide a fail safe mechanism should the worst happen. As software becomes a larger part of the electromechanical systems, hardware hazard controls are being replaced by software controls. Software can respond more quickly to potential problems. Software may also be able to respond more intelligently, avoiding a hazard rather than shutting down the system into a safe mode. The increased reliance on software means that the safety and reliability of the software becomes a vital component in a safe system. 2.1 What is a Hazard? A hazard is the presence of a potential risk situation that can result in or contribute to a mishap. Every hazard has at least one cause, which in turn can lead to a number of effects (i.e., damage, illness, etc.). A hazard cause is typically a fault, error, or defect in either the hardware or software or a human operator error, that results in a hazard. A hazard control is a method for preventing the hazard or reducing the risk of the hazard occurring . Hazard controls use hardware (e.g. pressure relief valve), software (e.g. detection of stuck valve and automatic response to open secondary valve), operator procedures, or a combination of methods to avert the hazard. For every hazard cause there must be at least one control method, where control method is usually a design feature (hardware and/or software), or a procedural step. Examples of hazard causes and controls are given in  HYPERLINK \l "table21" Table 2-1 Hazard Causes and Controls - Examples. Software faults can cause hazards and software can be used to control hazards. Some software hazard causes can be addressed with hardware hazard controls, although this is becoming less and less practical as software becomes more complex. For example, a hardwired gate array could be preset to look for certain predetermined hazardous words (forbidden or unplanned) transmitted by a computer, and shut down the computer upon detecting such a word. In practice, this is nearly impossible today because thousands of words and commands are usually sent on standard buses. 2.2 What Makes Software Hazardous? Software is hazardous if it Controls hazardous or safety critical hardware Monitors safety critical hardware as part of a hazard control Provides information upon which a safety-related decision is made Performs analysis that impacts automatic or manual hazardous operations Verifies hardware hazard controls Either remote or embedded real-time software that controls or monitors hazardous or safety critical hardware is considered hazardous. For example, software that controls an airlock or operates a high-powered laser is hazardous. Software which does not control or monitor a real world (real time or near real time) process is normally not hazardous. Software safety therefore concerns itself mostly with software that controls real world processes, whose malfunction can result in a hazard. However, other types of software can also be the subject of software safety. If the software resides with safety critical software on the same physical platform, it must also be considered safety critical unless adequately partitioned from the safety critical portion. Techniques such as firewalls and partitioning can be used to ensure that the non-critical software does not interrupt or disrupt the safety critical functions and operations. In addition, software that monitors physical entities, such as a pressure or temperature, is safety critical if the results they provide are used in making a safety-related decision. Software that performs off-line processes may be considered safety critical as well. For example, software that verifies a software or hardware hazard control must operate correctly. Failure of the test software may allow a potential hazard to be missed. In addition, software used in analyses that verify hazard controls or safety critical software must also function correctly, to prevent inadvertently overlooking a hazard. Computer simulations and models, such as finite element analyses programs (e.g. Nastran or Ansys), have become integrated into the design process and interface directly with CAD programs. These software packages automatically perform everything from linear and nonlinear stress analyses, to a variety of steady state and dynamic characteristic analyses, to the modeling of crash tests. These modeling programs are used to examine a variety of things from childrens toys to rocket engines. The use of computer modeling for design evaluation will continue to expand since the cost savings potential from limiting or eliminating prototype testing will continue to drive industry. The growing dependence on such computer modeling may lead to the potential misuse of these simulations. There is a tendency to assume the software does the analysis correctly and completely. The analyst must do sanity checks of the results, as a bare minimum, to verify that the modeling software is functioning correctly. For safety critical analyses, the modeling tool should be formally verified or tested. In addition, the analyst needs to have knowledge of the proper application of these virtual design environments. Therefore, users of software simulation programs that analyze safety critical hardware should be certified. All program input parameters such as loads, restraints, and materials properties, should be independently verified to assure proper modeling and analysis. 2.2.1 What is Safety Critical Software? Software that controls (activates, deactivates, etc.) functions which if performed or, if prevented from occurring, could result in injury to persons or damage to equipment is safety critical software. Software subsystem hazard analyses should be performed on any safety critical functions involving: a hazard cause a hazard control software providing information upon which safety critical decisions are made software used as a means of failure/fault detection Also, "contamination" of safety critical software by uncontrolled software sharing the same physical platform must be considered. Software that might not be safety critical in and of itself, could lock up the computer or write over critical memory areas when sharing a CPU or any routines with the safety critical software. 2.2.2 How Does Software Control Hazards? Many hardware hazard causes can be addressed with software hazard controls. From a historic standpoint hardware controls were preferred. But often it is not feasible to have only hardware controls, or sometimes to have any hardware controls at all. Increasingly, hazard responses are delegated to software because of the quick reaction time needed and as personnel are replaced by computers. For example, software can detect hazardous hardware conditions (via instrumentation) and execute a corrective action and/or warn operators. Software could detect what operational state a system is in and prevent certain activities which would be hazardous in that state, e.g. unlocking and opening a furnace door when the touch temperature is too high in the furnace on state. 2.2.3 What About Hardware Controls? NASA relies primarily on hardware controls, in conjunction with software controls, to prevent hazards. Often software is the first line of defense, monitoring for unsafe conditions and responding appropriately. The software may perform an automatic safing operation, or provide a message to a human operator, for example. The hardware control is the backup to the software control. If the software fails to detect the problem or does not respond properly to alleviate the condition, then the hardware control is triggered. Using a pressurized system as an example, the software monitors a pressure sensor. If the pressure goes over some threshold the software would respond by stopping the flow of gas into the system by closing a valve. If the software failed, either by not detecting the over-pressurization or by not closing the valve, then the hardware pressure relief valve would be used once the pressure reached a critical level. While software controls can be, and are, used to prevent hazards, they must be implemented with care. Special attention needs to be placed on this software during the development process.  HYPERLINK \l "_4.2.6_Formal_Inspections" Formal inspections of software controls is highly recommended. In addition, testing of the software control needs to be thorough and complete. For catastrophic hazards, NASA allows software to be only one of three hazard controls. In each case where software is a potential hazard cause or is utilized as a hazard control, the software is Safety Critical and should be analyzed, and its development controlled. Table 2-1 Hazard Causes and Controls - Examples CauseControlExample of Control ActionHardwareHardwarePressure vessel with pressure relief valve.HardwareSoftwareFault detection and safing function; or arm/fire check which activate or prevent hazardous conditions.HardwareOperatorOperator opens switch to remove power from failed unit.SoftwareHardwareHardwired timer or discrete hardware logic to screen invalid commands or data. Or sensor directly triggering a safety switch to override a software control system.SoftwareSoftwareTwo independent processors, one checking the other and intervening if a fault is detected. Emulating expected performance and detecting deviations.SoftwareOperatorOperator sees control parameter violation on display and terminates process.OperatorHardwareThree electrical switches in series in a firing circuit to tolerate two operator errors.OperatorSoftwareSoftware validation of operator initiated hazardous command; or software prevents operation in unsafe mode.OperatorOperatorTwo crew members, one commanding the other monitoring. 2.2.4 Caveats with Software Controls When software is used to control a hazard, some care must be made to isolate it from the hazard cause it is controlling. For a hazard cause outside of the computer processing arena (e.g. stuck valve), the hazard control software can be co-located with the regular operations software. Partitioning of the hazard control software is recommended. Otherwise, all of the software must be treated as safety critical because of potential contamination from the non-critical code. If the hazard cause is erroneous software, then the hazard control software can reside on a separate computer processor from the one where the hazard/anomaly might occur. Another option would be to implement a firewall or similar system to isolate the hazard control software, even though it shares the same processor as the hazard cause. If the hazard cause is a processor failure, then the hazard control must be located on another processor, since the failure would most likely affect its own softwares ability to react to that CPU hardware failure. This is a challenging aspect of software safety, because multiprocessor architectures are costly and system designers often prefer to use single processor designs, which are not failure tolerant. Also, system designers are sometimes reluctant to accept the NASA axiom that a single computer is inherently zero failure tolerant. Many also believe that computers fail safe, whereas NASA experience has shown that computers may exhibit hazardous failure modes. Another fallacy is to believe that upon any fault or failure detection, the safest action is always to shut down a program or computer automatically. This action can cause a more serious hazardous condition. 2.2.5 What is Fault Tolerance? Fault tolerance is the ability of the system to withstand an unwanted event and maintain a safe condition. It is determined by the number of failures which can occur in a system or subsystem without the occurrence of a hazard. A one fault tolerant system has two hazard controls, so that one will still be operational if the other fails. A two fault tolerant system has three controls for the hazard. NASA, based on extensive experience with spacecraft flight operations, has established levels of failure tolerance based on the hazard severity level necessary to achieve acceptable levels of risk. Catastrophic Hazards must be able to tolerate two hazard control failures. Critical Hazards must be able to tolerate a single hazard control failure. 2.3 The System Safety Program A System Safety Program Plan is a prerequisite to performing analysis or development of safety critical software. The System Safety Program Plan outlines the organizational structure, interfaces, and the required criteria for analysis, reporting, evaluation, and data retention to provide a safe product. This safety plan describes forms of analysis and provides a schedule for performing a series of these system and subsystem level analyses throughout the development cycle. It also addresses how safety analyses results and the sign-off and approval process should be handled. System safety program analyses follow the life cycle of the system development efforts. The system is comprised of the hardware, the software, and the interfaces between them (including human operators). What generally happens in the beginning of program development is that the hardware is conceived to perform the given tasks and the software concept is created that will operate and/or interact with the hardware. As the system develops and gains maturity, the types of safety analyses go from a single, overall assessment to ones that are more specific. While software is often considered a SUBSET of the complete system (a sub-system), it is actually a coexisting system, acting with and upon the hardware system. Software should always be considered in a systems context. Software often commands and monitors the system functions, as well as performing communications, data storage, sensor interpretation, etc., and is critical to even having a system!  Figure 2-1 Hazard Analysis The System Safety Program Plan should describe interfaces within the Assurance disciplines as well as the other Project disciplines. All analyses and tasks should be complementary and supportive, regardless of which group (development or assurance) has the responsibility. The analyses and tasks may be shared between the groups, and within each discipline, according to the resources and expertise of the project personnel. Concurrent engineering can help to provide better oversight, allowing information and ideas to be exchanged between the various disciplines, reduce overlapping efforts, and improve communications throughout the Project. Safety and Assurance personnel bring a safety point of view to a project, and should be included at the earliest possible stage. The information obtained and rapport established by being an early member of the team will go a long way in solving problems later in the Project. Helping the project to design in safety from the beginning is far easier and cheaper than late-stage alterations or additions. The Software System Safety Handbook [4] produced by the Department of Defense has an excellent reference to system safety from a risk management perspective. Chapter 3 of that document goes into detail about how risk and system safety are intertwined. Chapter 4 goes into detail on planning a software safety program, including hazard analyses. Appendix E of that document details generic requirements and guidelines for software development and test. 2.3.1 Safety Requirements Determination Depending on the program or project, there are many applicable safety requirements. These requirements may be levied from standards, from the organization, or may be written into a contract. They provide the minimum boundaries that must be met to ensure that the system is safe and will not function in a manner that is harmful to people, itself, other systems, or the environment. Safety requirements can include those specific to NASA, the FAA, the Department of Transportation (DOT), and even the Occupational Health and Safety Administration (OSHA). Once the safety requirements have been identified and the available system information is gathered, it is time to perform the first analysis. A common assessment tool used during this beginning activity is the Preliminary Hazard Analysis (PHA). This analysis tool will be discussed in more detail in  HYPERLINK \l "_2.3_Preliminary_Hazard" Section 2.4. The results of the PHA are a list of hazard causes and a set of candidate hazard controls. Any software hazard cause and software hazard controls or mitigation are taken forward as inputs to the software safety requirements flow-down process. System hazard controls should be traceable to system requirements. If controls identified by the PHA are not in the system specification, it should be amended by adding safety requirements to control the hazards. Then the process of flow-down from system specification to software specification will include the necessary safety requirements. At least one software requirement is generated for each software hazard control. Each requirement is incorporated into the Software Requirements Specification (SRS) as a safety critical software requirement. 2.4 Preliminary Hazard Analysis (PHA) An aerospace system generally contains three elements: hardware, software and one or more human operators (e.g., ground controllers, mission specialists or vehicle pilots). The hardware and software elements can be broken down further into subsystems and components. While individual elements, subsystems or components are often non-hazardous when considered in isolation, the combined system may exhibit various safety risks or hazards. This is especially true for software. Although it is often claimed that software cannot cause hazards, this is only true where the software resides on a non-hazardous platform and does not interface or interact with any hazardous hardware or with a human operator. Similarly, a hardware component such as an electrical switch or a fluid valve might be non-hazardous as a stand-alone component, but may become hazardous or safety critical when used as an inhibit in a system to control a hazard. Before any system with software can be analyzed or developed for use in hazardous operations or environment, a System PHA must be performed. Once initial system PHA results are available, safety requirements flow down and subsystem and component hazard analyses can begin. The PHA is the first source of specific software safety requirements, (i.e., unique to the particular system architecture). It is a prerequisite to performing any software safety analysis. It is important when doing a PHA to consider how the software interacts with the rest of the system. Software is the heart and brains of most modern, complex systems, controlling and monitoring almost all operations. When the system is decomposed into sub-elements, how the software relates to each component should be considered. The PHA should also look at how the component may feed back to the software (e.g. failed sensor leading the software to respond inappropriately). The PHA is the first of a series of system level hazard analyses, whose scope and methodology is described in the NASA NPG 8715.3 NASA Safety Manual [1], NSTS 13830 Implementation Procedure for NASA Payload System Safety Requirements [2], and NSTS-22254 Methodology for Conduct of Space Shuttle Program Hazard Analyses [3]. HYPERLINK \l "_2.4.3_Tools_and_1"Section 2.4.3 contains a list of resources that describe the process of performing a Preliminary Hazard Analysis. While this guidebook will not duplicate that information, the following sections summarize the methodology used in a PHA for software developers, managers, and others unfamiliar with NASA system safety. Note that the PHA is not a NASA-specific analysis, but is used throughout industry. IEEE 1228, Software Safety Plans, also requires that a PHA be performed. 2.4.1 PHA Approach The following is an excerpt from NPG 8715.3 Appendix-D: In many ways the PHA is the most important of the safety analyses because it is the foundation on which the rest of the safety analyses and the system safety tasks are built. It documents which generic hazards are associated with the design and operational concept. This provides the initial framework for a master listing (or hazard catalog) of hazards and associated risks that require tracking and resolution during the course of the program design and development. The PHA also may be used to identify safety-critical systems that will require the application of failure modes and effects analysis and further hazard analysis during the design phases. The program shall require and document a PHA to obtain an initial listing of risk factors for a system concept. The PHA effort shall be started during the concept exploration phase or earliest life cycle phases of the program. A PHA considers hardware, software, and the operational concepts. Hazards identified in the PHA will be assessed for risk based on the best available data, including mishap data from similar systems, other lessons learned, and hazards associated with the proposed design or function. Mishap and lessons learned information are available in the Incident Reporting Information System (IRIS) and the Lessons Learned Information System (LLIS). The risk assessment developed from the PHA will be used to ensure safety considerations are included in tradeoff studies of design alternatives; development of safety requirements for program and design specifications, including software for safety-critical monitor and control; and definition of operational conditions and constraints. Extensions and refinements of the PHA should coincide with the development of the design after the conceptual phase. A system generally consists of several discrete subsystems that should be individually analyzed in subsystem hazard analysis (SSHA). The results of the SSHA`s in turn feed into the SHA, which will integrate its subsystems and identify hazards that cross the subsystem interfaces. The number of systems and subsystems in a program is a function of the complexity of individual projects and will be determined by the program. See  HYPERLINK \l "table22" Table 2-2 Generic Hazards Checklist for a sample checklist of generic hazards. The last row gives some examples of how software can function as a control for a hazard. It is important to understand that this and other checklists are merely tools to encourage the thought process. Keep thinking and brainstorming for all the permutations of potential hazards, causes, and controls for a given system. Table 2-2 Generic Hazards Checklist Generic HazardContamination / CorrosionElectrical Discharge / ShockEnvironmental / WeatherFire / ExplosionImpact / CollisionLoss of Habitable Environment*Pathological / Physiological/ PsychologicalRadiationTemperature Extremes Chemical Disassociation Chemical Replacement / Combination Moisture Oxidation Organic (Fungus, Bacterial, Etc.) Particulate Inorganic (Includes Asbestos)External Shock Internal Shock Static Discharge Corona ShortFog Lightning Precipitation (Fog, Rain, Snow, Sleet, Hail) Sand / Dust Vacuum Wind Temperature ExtremesChemical Change (Exothermic, Endothermic) Fuel & Oxidizer in Presence of Pressure and Ignition Source Pressure Release / Implosion High Heat SourceAcceleration (Including Gravity) Detached Equipment Mechanical Shock / Vibration / Acoustical Meteoroids / Meteorites Moving / Rotating EquipmentContamination High Pressure Low Oxygen Content Low Pressure Toxicity Low Temperature High TemperatureAcceleration / Shock / Impact / Vibration Atmospheric Pressure (High, Low, Rapid Change) Humidity Illness Noise Sharp Edges Sleep, Lack of Visibility (Glare, Surface Fogging) Temperature Workload, ExcessiveEMI Ionizing Radiation (Includes Radon) Non-ionizing Radiation (Lasers, Etc.)High Low VariationsSoftware Controls ExampleReceive data input from hardware sensors (gas chromatograph, particle detector, etc.). Activate caution and warning indicators if levels surpass programmed limits, and/or automatically shutdown sources or activate fans.Shut down power prior to access of electrical components.Receive data input from sensor readings of hardware devices (particle detector, wind velocity probe, etc.). Send commands to shut down hardware if programmed limits are surpassed.Monitor temperature, activate fire suppression system if temperature goes over set threshold.Monitor position of rotating equipment. Keep position within defined limits, or shutdown motion if exceeding limits.Receive data input from sensor readings of hardware devices. Send commands to operate proper sequencing of valve operation.Monitor pressure and rate of change. Control pressure system to keep rate of change under set limit.Receive data input from sensor readings of hardware devices. Shut down high gain antenna when operational time limit is reached.Monitor temperature. Sound warning if temperature outside of limits*Health issues require coordination with Occupational Health Personnel 2.4.1.1 Identifying Hazards Preliminary hazard analysis of the entire system is performed from the top down to identify hazards and hazardous conditions. Its goal is to identify all credible hazards up front. Initially the analysis is hardware driven, considering the hardware actuators, end effects and energy sources, and the hazards that can arise. For each identified hazard, the PHA identifies the hazard causes and candidate control methods. These hazards and hazard causes are mapped to system functions and their failure modes. Most of the critical functions are associated with one or more system controls. These control functions cover the operation, monitoring and/or safing of that portion of the system safety assessment and must consider the system through all the various applicable subsystems including hardware, software, and operators. To assure full coverage of all aspects of functional safety, it can be helpful to categorize system functions as two types: Must work functions (MWFs) Must not work functions (MNWFs) The system specification often initially defines the criticality (e.g., safety critical) of some system functions, but may be incomplete. This criticality is usually expressed only in terms of the Must-Work nature of the system function, and often omits the Must-Not-Work functional criticality. The PHA defines all the hazardous MNWFs as well as the MWFs. Examples: A science experiment might have a system Criticality designation of 3 (Non-critical) in terms of its system function, because loss of the primary experiment science data does not represent a hazard. However, the experiment might still be capable of generating hazards such as electric shock due to inadvertent activation of a power supply during maintenance. Activation of power during maintenance is a MNWF. An experiment might release toxic gas if negative pressure (vacuum) is not maintained. Maintaining a negative pressure is a MWF. An air traffic control system and aircraft flight control systems are designed to prevent collision of two aircraft flying in the same vicinity. Collision avoidance is a MWF. A spacecraft rocket motor might inadvertently ignite while it is in the STS Cargo Bay. Motor ignition is a MNWF, at that time. It is apparent that the MNWF becomes a MWF when it is time for the motor to fire. If functions identified in the PHA were not included in the system specification, it should be amended to address control of those functions. 2.4.1.2 Risk Levels The following definitions of hazard severity levels are from NASA NPG 8715.3. CatastrophicCriticalLoss of entire system; loss of ground facility; loss of human life or permanent disabilityMajor system damage; severe injury or temporary disabilityModerateNegligibleMinor system damage; minor injurySome system stress, but no system damage; no or minor injury Likelihood of occurrence are assigned probabilities that are determined by the project or program. The possibility that a given hazard may occur is based on engineering judgment. The following definitions of likelihood of occurrence are provided as an example only, and are based on NPG 8715.3 NASA Safety Manual.  LikelyProbableA one in ten chance (or greater) that the event will happen A one-in-hundred to one-in-ten chance that the event will occurPossibleUnlikelyImprobableGreater than a one in a thousand chance that the event will occurRare, less than one in a thousand chance the event will happenVery rare, possibility is like winning the lottery Hazards are prioritized by the system safety organization in terms of their severity and likelihood of occurrence, as shown in  HYPERLINK \l "table23" Table 2-3 Hazard Prioritization - System Risk Index. Table 2-3 Hazard Prioritization - System Risk Index Severity LevelsLikelihood of OccurrenceLikelyProbablePossibleUnlikelyImprobableCatastrophic11234Critical12345Moderate23456Negligible345671 = Highest Priority (Highest System Risk), 7 = Lowest Priority (Lowest System Risk) The hazard prioritization is important for determining allocation of resources and acceptance of risk. Hazards with the highest risk, Level 1, are not permitted in a system design. A system design exhibiting 1 for hazard risk level must be redesigned to eliminate the hazard. The lowest risk levels, "5" and above, require minimal, if any, safety analysis or controls. For the three levels of risk in-between, the amount of safety analysis required increases with the level of risk. The extent of a safety effort is discussed within HYPERLINK \l "_3._SOFTWARE_SAFETY_2"Section 3, where three levels of safety analysis are described, i.e. Minimum, Moderate and Full. These correspond to risk as follows: Table 2-4 System Risk Level System Risk LevelClass of Safety Analysis Recommended1Not Applicable (Prohibited)2Full3Moderate4,5*Minimum6,7None (Optional) *Level 5 systems fall between Minimum and Optional, and should be evaluated to determine the class of safety analysis required. 2.4.1.3 NASA Policy for Hazard Elimination/Control The NASA policy towards hazards of Risk Level 2, 3 or 4/5 is defined in NPG 8715.3 Section 3.4, as follows: Hazards will be mitigated according to the following stated order of precedence: Eliminate Hazards Hazards are eliminated where possible. This is accomplished through design, such as by eliminating an energy source. For example, software could have access to a pressure control. If software access to the control is not needed, and malfunctioning software could lead to a hazard, then preventing softwares access to the control eliminates the hazard. Design for Minimum Hazards Hazards that cannot be eliminated must be controlled. For those hazards, the PHA evaluates what can cause the hazards, and suggests how to control the potential hazard causes. Control by design is preferred. The hazard may be minimized by providing failure tolerance (e.g. by redundancy - series and/or parallel as appropriate), by providing substantial margins of safety, or by providing automatic safing. For example, software verifies that all conditions are met prior to ignition of rocket engines. Incorporate Safety Devices For example, a Fire Detection and Prevention system to detect and interact with a fire event. Software may be a part of these devices, and may also provide the trigger for the safety device. Provide Caution And Warning Devices Software may monitor a sensor and trigger the caution/warning. Any software used in these caution and warning devices is safety critical. Develop Administrative Procedures and Training Control by procedure is sometimes allowed, where sufficient time exists for a flight crew member or ground controller to perform a safing action. This concept of time to criticality was used in the NASA Space Station Freedom program. 2.4.2 Preliminary Hazard Analysis Process First, System and Safety experts examine the proposed system concepts and requirements and identify System Level Hazards considering areas such as power sources, chemicals usage, mechanical structures, time constraints, etc. (as per  HYPERLINK \l "_2.3.1.1_Identifying_Hazards" Section 2.4.1.1 Identifying Hazards above). Next, the hazard cause(s) are identified. Common hazard cause categories include: collision, contamination, corrosion, electrical shock/damage, explosion, fire, temperature extremes, radiation, illness/injury and loss of capability. Each hazard has at least one cause, such as a hardware component failure, operator error, or software fault. The PHA should identify some or all of the hazard causes, based on the system definition at that point in the development effort. Consideration of these categories as risks early in the development effort reduces the chance that any of these surface as a problem on a project. Next, at least one hazard control must be identified for each hazard cause. NASA safety standards often stipulate the methods of control required for particular hazard causes. This is not necessary for PHA, but is necessary at later phases in the system development process. Each control method must be a real feature, usually a design feature (hardware and/or software), or a procedural sequence and must be verifiable. For each hazard control at least one verification method must be identified. Verification can be by analysis, test or inspection. In some cases, the verification method is defined by established NASA safety requirements (e.g. Payload Safety Requirements NSTS 1700.1 (V1-B)). Each system hazard is documented in a Hazard Report. Required data items for a Hazard Report are described below. HYPERLINK \l "figure22"Figure 2-2 Payload Hazard Report Form, shows the NASA Shuttle / Station Payload Hazard Report, which identifies hazards, their causes, controls, verification methods and verification status. Detailed instructions for completing this form are given as an appendix when the form is downloaded from the NASA Payload Safety Homepage [ HYPERLINK "http://wwwsrqa.jsc.nasa.gov/pce" http://wwwsrqa.jsc.nasa.gov/pce] and also in NPG 8715.3 NASA Safety Manual, Chapter-3, System Safety, and Appendix-D (Analysis Techniques) [1]. This form is offered as a good example. NASA does not have an agency wide standard except SER Payloads at this time so each project or group may develop their own. These reports once created are revisited and updated on subsequent Safety Analyses throughout the life cycle of the system. A summary of the contents of the form is described below: Hazard Description This describes a system hazard, such as an uncontrolled release of energy resulting in a mishap. Safety Requirement This can be a hardware or software requirement and is usually a system level requirement. It can result in further flow-down to software functionality by identifying software Hazard Controls as described below. Hazard Cause This is usually a fault or defect in hardware or software. Software causes include: Failure to detect a problem Failure to perform a function Performing a function at the wrong time, out of sequence, or when the program is in the wrong state Performing the wrong function Performing a function incompletely Failure to pass along information or messages Hazard Control This is usually a design feature to control a hazard cause. The hazard control should be related to the applicable safety requirements cited by the hazard report. For example, where independence and fault tolerance are required, the hazard control block describes how the design meets these requirements. Some formats of hazard reports include a block to describe: Hazard Detection Method This is the means to detect imminent occurrence of a hazardous condition as indicated by recognizing unexpected values of measured parameters. In HYPERLINK \l "figure22"Figure 2-2 Payload Hazard Report Form it is implicitly included in the Hazard Control section. Safety Verification Method This identifies methods used to verify the validity of each Hazard Control. These methods include analysis, test, demonstration or inspection. Status of Verification This identifies scheduled or actual start and completion dates of each verification item, and if the item is open or closed at the time of writing. Since all required information is typically not available at the start of the development life-cycle, details for the various items are filled in and expanded during the development life-cycle. Hazard causes and controls are identified early in the process and verifications are addressed in later life-cycle phases. 2.4.3 Tools and Methods for PHA Tools and methods for performing a formal Preliminary Hazard Analysis are detailed in the following documents: NSTS 22254, Methodology for Conduct of Space Shuttle Program Hazard Analysis SOFTWARE SYSTEM SAFETY HANDBOOK, A Technical & Managerial Team Approach, December 1999 (Joint Software System Safety Committee) MIL-STD-882B, Task 201 (PHL) and Task 202 (PHA)  HYPERLINK "http://books.usapa.belvoir.army.mil/cgi-bin/bookmgr/BOOKS/P385_16/FIGFIGUNIQ10" http://books.usapa.belvoir.army.mil/cgi-bin/bookmgr/BOOKS/P385_16/FIGFIGUNIQ10 In addition, many system safety books describe the process of conducting a Preliminary Hazard Analysis. Figure 2-2 Payload Hazard Report Form PAYLOAD HAZARD REPORTa. NO:b. PAYLOAD:c. PHASE:d. SUBSYSTEM:e. HAZARD GROUP:f. DATE:g. HAZARD TITLE:i. HAZARD CATEGORY FORMCHECKBOX   FORMCHECKBOX CATASTROPHIC CRITICALh. APPLICABLE SAFETY REQUIREMENTS:  j. DESCRIPTION OF HAZARD: k. HAZARD CAUSES: l. HAZARD CONTROLS: m. SAFETY VERIFICATION METHODS: n. STATUS OF VERIFICATION: o. APPROVALPAYLOAD ORGANIZATIONSSP/ISSPHASE IPHASE IIPHASE IIIJSC Form 542B (Rev November 22, 1999) (MS Word September 1997) 2.4.4 PHA is a Living Document The PHA is not final because of the absence of design maturity. Later in the design process hazards may be added or deleted, and additional hazard analysis performed. But an initial set of hazards must be identified early in order to begin safety engineering tasks in a timely manner to avoid costly design impacts later in the process. The PHA is also required before software subsystem hazard analysis can begin. Those hazard causes residing in the software portion of a control system become the subject of the software subsystem hazard analysis. It is important to reexamine softwares role and safety impact throughout the system development phases. Software is often relied on to work around hardware problems encountered which result in additions and/or changes to functionality. 2.5 Software Subsystem Hazard Analysis After safety critical software is identified in the first cycle of the PHA, software hazard analysis can begin. The first cycle of system hazard analysis and software hazard analysis are top-down only. Bottom-up analyses take place after a sufficient level of design detail is available. The first cycle of bottom up analysis is Criticality analysis of requirements, (as described in  HYPERLINK \l "_5.1.2_Requirements_Criticality" Section 5.1.2 Requirements Criticality Analysis). At this early phase, only the highest level of definition is available for functions that the software will perform and these may change as development proceeds. Many analysis and development techniques exist for safety critical software, and there is no one size fits all approach. The extent of software safety effort required must be planned based on the degree of safety risk of the system. Hazards are prioritized by the system safety organization in terms of their severity and likelihood of occurrence, as shown in  HYPERLINK \l "table23" Table 2-3 Hazard Prioritization - System Risk Index. This system risk index identifies three levels of system safety effort. HYPERLINK \l "_3._SOFTWARE_SAFETY_2"Section 3 will expand upon this, and provide three levels of software safety effort, based on softwares control of and responses to the hazardous hardware HYPERLINK \l "_3._SOFTWARE_SAFETY_2"Section 3. SOFTWARE SAFETY PLANNING, describes the levels of software safety effort, and presents guidelines on which analysis and development techniques may be selected for each. Strategies for tailoring the safety effort are also provided.  HYPERLINK \l "_4._SAFETY_CRITICAL_1" Section 4. SAFETY CRITICAL SOFTWARE DEVELOPMENT describes software development tasks and techniques in detail.  HYPERLINK \l "_5._SOFTWARE_SAFETY_1" Section 5. SOFTWARE SAFETY ANALYSIS provides many analysis tasks and techniques that can be used throughout the software development lifecycle. HYPERLINK \l "_6._Programming_Languages_1"Section 6. SOFTWARE DEVELOPMENT ISSUES discusses programming languages, tools, new technology, and programming practices from a safety point of view.  HYPERLINK \l "_7._SOFTWARE_ACQUISITION_3" Section 7. Software Acquisition discusses using off-the-shelf, previously developed, and contractor acquired software. 3. SOFTWARE SAFETY PLANNING The following section describes how to plan a software safety effort, and how to tailor it according to the risk level of the system. In  HYPERLINK \l "_2.3.1.2_Risk_Levels" Section 2.4.1.2 Risk Levels, determination of the level of safety risk inherent in the system was presented.  HYPERLINK \l "_3.2_Scope_of" Section 3.2 Scope of Software Subsystem Safety Effort discusses tailoring the level of effort for both software development and software analysis tasks performed by software development personnel and software safety personnel. On the development side, the software safety engineer works in conjunction with the system safety organization to develop software safety requirements, distribute those safety requirements to the software developers, and monitor their implementation. On the analysis side, the software safety engineer analyzes software products and artifacts to identify new hazards and new hazard causes to be controlled, and provides the results to the system safety organization to be integrated with the other (non-software) subsystem analyses. Figure 3-1 Elements of a Safety Process The following sections of the guidebook will discuss the various analysis and development tasks used in the software development life cycle as depicted in a typical waterfall model. 3.1 Software Development Life-cycle Approach  HYPERLINK \l "table31" Table 3-1 NASA Software Life-cycle - Reviews and Documents shows the typical NASA software waterfall design life-cycle phases and lists the reviews and deliverable project documents required at each life-cycle phase. Each of these reviews and project documents should contain appropriate references and reports on software safety. Software safety tasks and documents for each design life-cycle phase are also shown. If a particular software safety task or document is defined elsewhere in this guidebook, the section number where it can be found is shown next to the name. Software development tasks and documents listed in the table will be described in the following subsections, organized by the life-cycle phase in which they are conducted or produced. In rapid prototyping environments, or spiral life cycle environments, software artifacts, including prototype codes, are created early (i.e. in requirements or architectural design phases). The early artifacts are evaluated and performance results used to modify the requirements. Then the artifacts are regenerated using the new requirements. This process may go through several iterations. In these environments, Safety Analyses are also done in several smaller iterative steps. Safety and software development must work closely together to coordinate the best time to perform each round of analysis. Table 3-1 NASA Software Life-cycle - Reviews and Documents Life-cycle PhasesMilestone ReviewsSoftware Safety TasksDocumentsSoftware Concept and Initiation (Project System and Subsystem Requirements and Design Development)SCR - Software Concept Review Software Management Plan Review Phase-0 Safety ReviewTailoring Safety Effort  HYPERLINK \l "_2.3_Preliminary_Hazard" Preliminary Hazard Analysis (PHA)  HYPERLINK \l "_2.3.1_PHA_Approach" PHA Approach Phase 0/1 Safety Reviews Hazards Tracking and Problem ResolutionSoftware Management Plan Software Systems Safety Plan Software Configuration Management Plan Software Quality Assurance Plan Risk Management PlanSoftware RequirementsSRR - Software Requirements Review Phase-1 Safety Review2.2 HYPERLINK \l "_4.2.1.1_Safety_Requirements"Safety Requirements Flow-down Generic Software Safety Requirements 4.2.1  HYPERLINK \l "_4.2.1_Development_of" Development of Software Safety Requirements 5.1  HYPERLINK \l "_5.1_Software_Safety" Software Safety Requirements Analysis 5.1.2  HYPERLINK \l "_5.1.2_Requirements_Criticality" Requirements Criticality Analysis 5.1.3  HYPERLINK \l "_5.1.3_Specification_Analysis" Specification Analysis HYPERLINK \l "_5.1.6_Software_Fault"Software Fault Tree Analysis (SFTA)  HYPERLINK \l "_5.1.5_Timing,_Throughput" Timing, Throughput, and Sizing AnalysisSoftware Requirements Document  HYPERLINK \l "_5.1.4_Formal_Inspections" Formal InspectionSoftware Architectural or Preliminary DesignSoftware Preliminary Design Review (PDR) Phase-1/2 Safety Review5.2.1  HYPERLINK \l "_5.2.1_Update_Criticality" Update Criticality Analysis 5.2.2  HYPERLINK \l "_5.2.2_Conduct_Hazard" Conduct Hazard Risk Assessment 5.2.3  HYPERLINK \l "_5.2.3_Analyze_Architectural" Analyze Architectural Design 5.2.4.1  HYPERLINK \l "_5.2.4.1_Interdependence_Analysis" Interdependence Analysis 5.2.4.2  HYPERLINK \l "_5.2.4.2__Independence" Independence Analysis  HYPERLINK \l "_5.2.8__Formal" Formal Methods and Model CheckingPreliminary Acceptance Test Plan Software Design Specification - (Preliminary) Formal InspectionSoftware Detailed DesignSoftware Critical Design Review (CDR) Phase-2 Safety Review HYPERLINK \l "_5.3.1_Design_Logic" Design Logic Analysis 5.3.2  HYPERLINK \l "_5.3.2_Design_Data" Design Data Analysis  HYPERLINK \l "_5.3.3_Design_Interface" Design Interface Analysis 5.3.4  HYPERLINK \l "_5.3.4_Design_Constraint" Design Constraint Analysis  HYPERLINK \l "_5.3.6_Dynamic_Flowgraph" Dynamic Flowgraph Analysis 5.3.13  HYPERLINK \l "_5.3.11_Requirements_State" Requirements State Machines  HYPERLINK \l "_5.3.13_Software_Failure" Software Failure Modes and Effects AnalysisFinal Acceptance Test Plan Software Design Specification - (Final) Formal Inspection ReportsSoftware Implementation (Code and Unit Test)Formal Inspections, Audits Phase-2/3 Safety Review5.4.1  HYPERLINK \l "_5.4.1_Code_Logic" Code Logic Analysis 5.4.3  HYPERLINK \l "_5.4.2_Code_Data" Code Data Analysis 5.4.4  HYPERLINK \l "_5.4.3_Code_Interface" Code Interface Analysis  HYPERLINK \l "_5.4.6_Formal_Code" 5.4.7 Code Inspections, Checklists, and coding standards  HYPERLINK \l "_5.4.8_Unused_Code" Unused Code Analysis  HYPERLINK \l "_5.4.9_Interrupt_Analysis" Interrupt AnalysisSource code listings Unit Test reports Formal Inspection ReportsSoftware Integration and TestTest Readiness Review Phase-3 Safety Review HYPERLINK \l "_4.6.1_Testing_Techniques" Testing Techniques  HYPERLINK \l "_4.6.6_Regression_Testing" Regression Testing HYPERLINK \l "_4.6.7_Software_Safety"Software Safety Testing  HYPERLINK \l "_4.6.6_Test_Witnessing" Test Witnessing  HYPERLINK \l "_5.5.1_Test_Coverage" Test Coverage  HYPERLINK \l "_5.5.5_Test_Results" Test Results Analysis  HYPERLINK \l "_5.5.3_Reliability_Modeling" Reliability ModelingTest Procedures Test Reports Problem/Failure Resolution ReportsSoftware Acceptance and DeliverySoftware Acceptance Review Phase-3 Safety ReviewSoftware Delivery Documents Acceptance Data PackageSoftware Sustaining Engineering and Operations(Same activities as for development)(Update of all relevant documents) 3.2 Scope of Software Subsystem Safety Effort In  HYPERLINK \l "_2.4.1.2_Risk_Levels" Section 2.4.1.2 Risk Levels, the System Risk Index was developed. This index specified the hazard risk for the system as a whole. The software element of the system inherits from the system risk, modified by the extent with which the software controls, mitigates, or interacts with the hazardous elements. Merging these two aspects is not an exact science, and the information presented in this section is meant to guide the tailoring of the safety effort. Use intelligence, and not blind adherence to a chart, when determining the level of safety effort. Begin the process of tailoring the software safety effort by determining how much software is involved with the hazardous aspects of the system. Search the PHA, Software Hazard Analysis, Software Risk Assessments, and other analyses for systems and sub-systems that can be initially categorized as safety critical or not. Then look at how the software relates to the safety critical system components. Tailoring the software safety effort can be accomplished by following three steps: 1. Identify safety critical software 2. Categorize safety critical software subsystems (i.e., how critical is it?)3. Determine the extent of development effort and oversight required 3.2.1 Identify Safety Critical Software Before deciding how to apply software safety techniques to a project, it is important to first understand the many factors which determine if there even is a safety concern. The first determination to be made is whether there is any software in the system which either monitors, controls, interfaces with directly, or is resident in a processor handling critical or hazardous system functions. Other safety critical software performs analysis or crunches numbers that will be used with, or for, safety critical equipment. Note that non-safety critical software becomes safety critical if it can impact safety critical software resident with it (on the same machine). Next it is important to determine the reliability of the system. Most aerospace products are built up from small, simple components to make large, complex systems. Electronic components usually have a small number of states and can be tested exhaustively, due to their low cost, to determine their inherent reliability. (Mechanical components are often another matter.) Reliability of a large hardware system is determined by developing reliability models using failure rates of the components that make up the system. Reliability and safety goals of hardware systems can usually be reached through a combination of redundant design configurations and selection of components with suitable reliability ratings. Reliability of software is much harder to determine. Software does not wear out or break down but may have a large number of states that cannot be fully tested. An important difference between hardware and software is that many of the mathematical functions implemented by software are not continuous functions, but functions with an arbitrary number of discontinuities [4]. Although mathematical logic can be used to deal with functions that are not continuous, the resulting software may have a large number of states and lack regularity. It is usually impossible to establish reliability values and prove correctness of design by testing all possible states of a medium to large (more than 40,000-50,000 lines of code) software system within a reasonable amount of time, if ever. Furthermore, testing can only commence after preliminary code has been generated, typically late in the development cycle. As a result, it is very difficult to establish accurate reliability and design correctness values for software. If the inherent reliability of software cannot be accurately measured or predicted, and most software designs cannot be exhaustively tested, the level of effort required to meet safety goals must be determined using other characteristics of the system. The following characteristics have a strong influence on the ability of software developers to create reliable safe software: Degree of Control The degree of control that the software exercises over safety-critical functions in the system. Software which can cause a hazard if it malfunctions is considered safety critical software. Software which is required to either recognize hazardous conditions and implement automatic safety control actions, or which is required to provide a safety critical service, or to inhibit a hazardous event, will require more software safety resources and detailed assessments than software which is only required to recognize hazardous conditions and notify a human operator to take necessary safety actions. Human operators must then have redundant sources of data independent of software, and can detect and correctly react to misleading software data before a hazard can occur. Fatal accidents have occurred involving poorly designed human computer interfaces, such as the Therac-25 X-ray machine [2]. In cases where an operator relies only on software monitoring of critical functions, then a complete safety effort is required. (e.g., might require monitoring via two or more separate CPUs and resident software with voting logic.). Complexity The complexity of the software system. Greater complexity increases the chances of errors. The number of safety related software requirements for hazards control increases software complexity. Some rough measures of complexity include the number of subsystems controlled by software and the number of interfaces between software/hardware, software/user and software/software subsystems. Interacting, parallel executing processes also increase complexity. Note that quantifying system complexity can only be done when a high level of design maturity exists (i.e., detail design or coding phases). Software complexity can be estimated based on the number and types of logical operations it performs. Several automated programs exist to help determine softwares complexity. These should be used if possible; however, the results should be used as guidelines only. Timing criticality The timing criticality of hazardous control actions. A system with software control of hazards that have fast reaction times or a system that has a slow response time, will require more of a software safety assurance effort. For example, spacecraft that travel beyond Earth orbit have a turnaround time spent notifying a ground human operator of a possible hazard and waiting for commands on how to proceed that may exceed the time it takes for the hazard to occur. If the software does not meet any of the above criteria, then it is probably not safety critical. Having determined the software is safety critical, the next questions are how critical? and what are the risks?. Categorize Safety Critical Software Subsystems Once an initial determination about safety criticality has been made, further analyses (such as the FMEA or FTA) will help to discover, confirm, or diminish the criticality rating. As the design progresses, it is essential to scope the software effort up front based on the initial analysis, then adjust as needed, as more information becomes available. Analysis is an interactive process. The first pass, the PHA or initial Software Hazard Analysis, shows where to start focusing attention. Each successive analysis, requirements, design, code, indicates where to focus both development and further analysis. 3.2.2.1 Software Control Categories Categorizing software that controls safety critical functions in the system is based on the degree of control the software exercises over the functions. Software that can cause a hazard if it malfunctions is included in the category of high risk software. Software which is required to either recognize hazardous conditions and implement automatic safety control actions, or which is required to provide a safety critical service, or to inhibit a hazardous event, will require more software safety resources and detailed assessments. Software which is required to recognize hazardous conditions and notify a human operator to take necessary safety actions will not require as rigorous a safety effort as the former examples. This example assumes that the human operator has redundant sources of data, independent of software, and can detect and correctly react to misleading software data before a hazard can occur. Adequate hardware safety features to prevent hazards will also move the software from high risk to lower risk, and require a less rigorous safety program. In cases where an operator relies only on software monitoring of critical functions, complete safety effort is required. A reference source of definitions for software control categories is from an older version of MIL-STD-882, specifically revision C. MIL-STD-882C [4] has been replaced by MIL-STD-882D, which does not reference the software categories. MIL-STD-882C categorized software according to their degree of control of the system, as described below: Software exercises autonomous control over potentially hazardous hardware systems, subsystems or components without the possibility of intervention to preclude the occurrence of a hazard. Failure of the software, or a failure to prevent an event, leads directly to a hazard's occurrence. Software exercises control over potentially hazardous hardware systems, subsystems, or components allowing time for intervention by independent safety systems to mitigate the hazard. However, these systems by themselves are not considered adequate. Software item displays information requiring immediate operator action to mitigate a hazard. Software failures will allow or fail to prevent the hazard's occurrence. Software item issues commands over potentially hazardous hardware systems, subsystems or components requiring human action to complete the control function. There are several, redundant, independent safety measures for each hazardous event. Software generates information of a safety critical nature used to make safety critical decisions. There are several redundant, independent safety measures for each hazardous event. Software does not control safety critical hardware systems, subsystems or components and does not provide safety critical information. Complexity increases the possibilities of errors, an important point to consider. Increasing errors lead to the possibility of fault, which leads to failures. The following chart takes into consideration the complexity of the software when categorizing the software. The chart also relates back to the system risk index discussed in  HYPERLINK \l "_2.3.1.2_Risk_Levels" Section 2.4.1.2 Risk Levels and has already eliminated level 1 (prohibited) and level 5 (minimal risk). At this point the software category links the complexity of the software, the control which the software exerts on a system, and the system risk index. The links will be important in creating a Software Hazard Criticality Matrix. Table 3-2 Software Subsystem Categories Software CategoryDescriptionsIAPartial or total autonomous control of safety critical functions by software.(System Risk Index 2)Complex system with multiple subsystems, interacting parallel processors, or multiple interfaces.Some or all safety-critical functions are time critical.IIA & IIB*Control of hazard but other safety systems can partially mitigate. Detects hazards, notifies human operator of need for safety actions.(System Risk Index 3)Moderately complex with few subsystems and/or a few interfaces, no parallel processing.Some hazard control actions may be time critical but do not exceed time needed for adequate human operator response.IIIA & III B*Several mitigating systems prevent hazard if software malfunctions. No safety critical data generated for a human operator.(System Risk Index 4)Simple system with only 2-3 subsystems, limited number of interfaces.Not time-critical.Note: System risk index number is taken from Table 2-3 Hazard Prioritization - System Risk Index * A = software control of hazard. B = Software generates safety data for human operator 3.2.2.2 Software Hazard Criticality Matrix The Software Hazard Criticality Matrix is established using the hazard categories for the columns and the Software Control Categories for the rows. The matrix relates how involved the software is in controlling a hazard with how bad the hazard is. A Software Hazard Risk Index is assigned to each element of the matrix, just as System Risk Index numbers are assigned in the  HYPERLINK \l "table23" Hazard Prioritization - System Risk matrix. Unlike the System Risk Index, a low index number from the Software Hazard Criticality Matrix does not mean that a design is unacceptable. Rather, it indicates that greater resources need to be applied to the analysis and testing of the software and its interaction with the system. Table 3-3 Software Hazard Criticality Matrix Software Control CategoryHazard CategoryCatastrophicCriticalModerateNegligible / MarginalIA (System Risk Index 2)1135IIA & IIB (System Risk Index 3)1245IIIA & IIIB (System Risk Index 4)2355IV (System Risk Index 5)3455Note: System risk index number is taken from Table 2-3 Hazard Prioritization - System Risk Index The interpretation of the Software Hazard Risk Index is given in  HYPERLINK \l "table34" Table 3-4. The level of risk relates directly to the amount of analysis and testing that should be applied to the software. Table 3-4 Software Hazard Risk Index Software Hazard Risk IndexSuggested Criteria1High Risk: significant analysis and testing resources2Medium Risk: requirements and design analysis and in-depth testing required3-4Moderate Risk: high level analysis and testing acceptable with management approval5Low Risk: acceptable  HYPERLINK \l "figure32" Figure 3-2 shows the relationships among the various risk indices and software criteria. The System Risk Index feeds into the Software Risk Index, modified by the software categories. The modification relates to how much control the software has over the hazard, either potentially causing it or in controlling/mitigating the hazard. Note that the Software Risk Index relates to only a subset of the System Risk Index, because the riskiest level (lowest System Index number) is prohibited, and the levels with the least system risk do not require a safety effort. Figure 3-2 Relationship of Risk Indices 3.2.3 Determine the Development Effort and Oversight Required 3.2.3.1 Determine Extent of Effort The level of required software safety effort for a system shown in HYPERLINK \l "table35"Table 3-5 Required Software Safety Effort, is determined by its Software Hazard Risk Index as shown in HYPERLINK \l "table33"Table 3-3. The mapping is essentially: Index 1 = full effort, 2 and 3 = moderate effort, and 4/5 = minimum effort. However, if your risk index is a two, consider whether you are a high two (closer to level one more risk). If so, your safety effort should be the full safety effort, or somewhere between full and moderate. Also, if your Risk Index is a high 4, then the safety effort falls into the moderate category. Another difference between the tables is with category IV software, which does not participate in any hazardous functions. Normally, no safety effort would be needed for such software. However, with a catastrophic and critical hazards, non-safety critical software should be evaluated for possible failures and unexpected behavior that could lead to the hazard. Further definition of what is meant by Full, Moderate, and Minimum software safety effort will be discussed in HYPERLINK \l "_3.2.3.3_Tailoring_the"  Section 3.2.3.3. Table 3-5 Required Software Safety Effort Software CategoryHazard Severity Level from Section 2.3.1.2See Table 3-3CatastrophicCriticalModerateNegligible / MarginalIA (Software Risk Index 1)FullFullModerateMinimumIIA & IIB (Software Risk Index 2/3)FullModerateMinimumMinimumIIIA & IIIB (Software Risk Index 4/5)ModerateModerateMinimumMinimumIV Software does not directly control hazardous operations.MinimumMinimumNoneNone  3.2.3.2 Oversight Required The level of software quality assurance and independent oversight required for safety assurance depends on the system risk index as follows: Table 3-6 Degree of Oversight vs. System Risk Software Risk IndexSystem Risk IndexDegree of Oversight1Not applicable (Prohibited)12Fully independent IV & V organization, as well as in-house SA23In house SA organization; Possible software IA NOTEREF _Ref509393694 \f \* MERGEFORMAT 134In house SA organization4,55-7Minimal in house Software Assurance (SA) The level of oversight is for safety purposes, not for mission success. Oversight for mission success purposes can be greater or less than that required for safety. A full-scale software development effort is typically performed on a safety critical flight system, (e.g. a manned space vehicle, or high value one-of-a-kind spacecraft or aircraft, critical ground control systems, critical facilities, critical ground support equipment, unmanned payloads on expendable vehicles, etc.). Other types of aerospace systems and equipment often utilize less rigorous development programs, such as non-critical ground control systems, non-critical facilities, non-critical ground support equipment, non-critical unmanned payloads on expendable vehicles, etc. In those cases, subsets of the milestone reviews and software safety development and analysis tasks can be used. 3.2.3.3 Tailoring the Effort Once the scope of the software safety effort has been determined, it is time to tailor it to a given project or program. The safety activities should be sufficient to match the software development effort and yet ensure that the overall system will be safe. The scope of the software development and software safety effort is dependent on risk. Software safety tasks related to the life-cycle phases and milestone reviews are listed in  HYPERLINK \l "table31" Table 3-1 of this guidebook. The tasks that will be performed are dependent on the risks associated with the software. If your system will include off-the-shelf software (COTS, GOTS), reused software from another project, or software developed by a contractor, refer to HYPERLINK \l "_7._SOFTWARE_ACQUISITION_3"Section 7 Software Acquisition. This section discusses the risks of the various types of acquired software. Additional analyses and tests may need to be performed, depending on the criticality of the acquired software and the level of knowledge you have about how it was developed and tested. The level of safety effort may be higher if the COTS/reused/contracted software is safety critical itself or interacts with the safety critical software. This guidebook describes tasks, processes, methodologies, and reporting or documentation required for a full-fledged, formal software safety program for use on a large, software-intensive project. A project may choose to implement all the activities described in those sections or, based upon information presented in this section, may choose to tailor the software safety program. At the very minimum, a project must review all pertinent specifications, designs, implementations, tests, engineering change requests, and problem/failure reports to determine if any hazards have been inadvertently introduced. Software quality assurance activities should always verify that all safety requirements can be traced to specific design features, to specific program sets/code modules, and to specific tests conducted to exercise safety control functions of software (See  HYPERLINK \l "_5.1.1_Software_Safety" Section 5.1.1 Software Safety Requirements Flow-down Analysis). Sections 3.2.3.3.1 through 3.2.3.3.3 below help determine the appropriate methods for software quality assurance, software development and software safety for full, moderate, and minimum safety efforts. Ultimately, the categorization of a projects software and the range of selected activities must be negotiated and approved by project management, software development, software quality assurance, and software systems safety personnel together. This may require educating various team members about software safety as well as knowledgeable negotiators. 3.2.3.3.1 Full Software Safety Effort Systems and subsystems that have severe hazards which can escalate to major failures in a very short period of time require the greatest level of safety effort. Some examples of these types of systems include life support, fire detection and control, propulsion/pressure systems, power generation and conditioning systems, and pyrotechnics or ordnance systems. These systems may require a formal, rigorous program of quality and safety assurance to ensure complete coverage and analysis of all requirements, design, code, and tests. Safety analyses, S/W analyses, safety design features, and Software Assurance (SA) oversight are highly recommended. In addition, Independent Verification and Validation (IV&V) activities may be required. Each project, regardless of their level of safety criticality, must perform an IV&V evaluation at the beginning of the project, and whenever the project changes significantly. NASA NPG 8730 describes the process and responsibilities of all parties. IV&V provides for independent evaluation of the project software, including additional analyses and tests performed by the IV&V personnel. This is in addition to any analyses and tests performed by the project SA. 3.2.3.3.2 Moderate Software Safety Effort Systems and subsystems which fall into this category typically have either a limited hazard potential or, if they control serious hazards, the response time for initiating hazard controls to prevent failures is long enough to allow for notification of human operators and for them to respond to the hazardous situation. Examples of these types of systems include microwave antennas, low power lasers, and shuttle cabin heaters. These systems require a rigorous program for safety assurance of software identified as safety-critical. Non-safety-critical software must be regularly monitored to ensure that it cannot compromise the safety-critical portions of the software. Some analyses to assure there are no undiscovered safety critical areas and may need some S/W design safety features. Some level of Software Assurance oversight is still needed to assure late design changes dont affect the safety category. All NASA projects, regardless of their level of safety criticality, must perform an IV&V evaluation at the beginning of the project, and whenever the project changes significantly. NASA NPG 8730 describes the process and responsibilities of all parties. While a project of this level may require IV&V, it is more likely to require an Independent Assessment. From NPG 8730, Software independent assessment (IA) is defined as a review of and analysis of the program/projects system software development lifecycle and products. The IA differs in scope from a full IV&V program in that IV&V is applied over the lifecycle of the system whereas an IA is usually a one time review of the existing products and plans. In many ways, IA is an outside audit of the projects development process and products (documentation, code, test results, etc.). 3.2.3.3.3 Minimum Software Safety Effort For systems in this category, either the inherent hazard potential of a system is very low or control of the hazard is accomplished by non-software means. Failures of these types of systems are primarily reliability concerns. This category may include such things as scan platforms and systems employing hardware interlocks and inhibits. Software development in these types of systems must be monitored on a regular basis to ensure that safety is not inadvertently compromised or that features and functions are added which now make the software safety critical. A formal program of software safety is not usually necessary. Good development practices and Software Assurance are still necessary, however. Consider implementing some of the development activities in  HYPERLINK \l "_4._SAFETY_CRITICAL_1" Section 4 and the analyses in  HYPERLINK \l "_5._SOFTWARE_SAFETY_1" Section 5. Many of these activities provide increased reliability as well as safety, and for a minimal effort. They help assure mission success. As for the other safety levels, all NASA projects must perform an IV&V evaluation at the beginning of the project, and whenever the project changes significantly. NASA NPG 8730 describes the process and responsibilities of all parties. A project at this level is unlikely to require either IV&V or IA. 3.2.3.3.4 Match the Safety Activities to Meet the Development Effort During the software development process the development organization will be creating many products (documents, designs, code, etc.). The safety organization, as part of this process, will monitor and perform its own analysis, inspection, and review of the generated products with a safety slant. Prior to this, the question that should be asked is, How much work will these activities entail? That is, for the type of development process required, Which safety activities are required and how much of each activity is necessary? All of this is dependent on the amount of risk as stated in the previous tables. Whether the effort is large or small, whether it is the entire project or just a sub-system, no matter the complexity, the effort will still require planning. The software safety activities which will need tailoring are as follows: AnalysisThere are many types of analyses which can be completed during software development. Every phase of the life-cycle can be affected by increased analysis as a result of safety considerations. The analyses can range from Requirements Criticality Analysis to Software Fault Tree Analysis of the design to Formal Methods.InspectionsInspections can take place in a number of settings and with varying products (requirements to test plans). Again, the number of inspections and products is dependent on the risk related to the system.ReviewsThe number of formal reviews and the setting up of delta reviews can be used to give the organization more places to look at the products as they are being developed.Verification & ValidationVerification tests that the system was built right (according to the requirements). Validation checks that the right system was built (truly meets the needs and intent of the customer). V&V primarily involve testing of the software system, though it encompasses analyses, inspections, and reviews as well. The amount of testing can be tailored, with safety critical units and subsystems receiving the majority of the test effort. Traceability from the requirements, through design and coding, and into test is an important part of V&V. Development and designing in safety features such as firewalls, arm-fire commanding, etc. depends on where it is best applied and needed. The degree to which each of these activities are performed is related to risk and in turn to the software safety effort table just reviewed. 3.3 Incorporating Software Safety into Software Development This section provides software safety guidance for planning both development and analysis tasks during different life cycle phases. Specific details pertaining to the performance and implementation of both tasks are discussed later in HYPERLINK \l "_4._SAFETY_CRITICAL_1"Section 4. SAFETY CRITICAL SOFTWARE DEVELOPMENT and HYPERLINK \l "_5._SOFTWARE_SAFETY_2"Section 5. SOFTWARE SAFETY ANALYSIS. There are many software engineering and assurance techniques, which can be used to control software development and result in a high-quality, reliable, and safe product. This section provides lists of recommended techniques, which have been used successfully by many organizations. Software developers may employ several techniques for each development phase, based on a project's required level of software safety effort. Other techniques, which are not listed in these tables, may be used if they can be shown to produce comparable results. Ultimately, the range of selected techniques must be negotiated and approved by project management, software development, software quality assurance, and software systems safety.  HYPERLINK \l "table37" Table 3-7 Software Requirements Phase through  HYPERLINK \l "table313" Table 3-13 Software System Testing are modifications of tables that appear from an early International Electrotechnical Committee (IEC) draft standard IEC 1508, "Software For Computers In The Application Of Industrial Safety-Related Systems" [5]. This document is currently under review by national and international representatives on the IEC to determine its acceptability as an international standard on software safety for products which contain Programmable Electronic Systems (PESs). This set of tables is a good planning guide for software safety. These tables provide guidance on the types of assurance activities which may be performed during the life-cycle phases of safety-critical software development. For this guidebook, the Software Safety Effort level, as determined from  HYPERLINK \l "table35" Table 3-5 Required Software Safety Effort, will determine which development activities are required for a particular project. Each of the following tables lists techniques and recommendations for use based on safety effort level for a specific software development phase or phases. The recommendations are coded as: Recommendations Codes(Mandatory((Highly Recommended(Recommended(Not Recommended Most of the "Not Recommended" entries result from consideration of time and cost in relation to the required level of effort and the expected benefits. A mixture of entries marked as "Recommended" may be performed if extra assurance of safety or mission success is desired. "Highly Recommended" entries should receive serious consideration for inclusion in system development. If not included, it should be shown that safety is not compromised. In some cases the tables in this guidebook take a more conservative view of applicability of the techniques than the original IEC tables. Each project and each organization is different. The final list of techniques to be used on any project should be developed jointly by negotiations between project management and safety assurance. Software developers should be included as well, since they will be the ones who must follow the agreed upon plan. All the following tables,  HYPERLINK \l "table37" Table 3-7 Software Requirements Phase through HYPERLINK \l "table313"  Table 3-13 Software System Testing, list software development, safety and assurance activities which should be implemented in the stated phases of development.  HYPERLINK \l "_APPENDIX_E" Appendix E contains forms that can be used to create lists of activities to be completed for each phase. Table 3-7 Software Requirements Phase TechniqueSafety Effort LevelMINMODFULL2.4  HYPERLINK \l "_2.3_Preliminary_Hazard" Preliminary Hazard Analysis (PHA)(((5.1.1  HYPERLINK \l "_5.1.1_Software_Safety" Software Safety Requirements Flow-down Analysis(((((5.1.1.1 HYPERLINK \l "_5.1.1.1_Checklists_and"  Checklists and cross references((((((5.1.2  HYPERLINK \l "_5.1.2_Requirements_Criticality" Requirements Criticality Analysis((((4.2.2  HYPERLINK \l "_4.2.2__Generic" Generic Software Safety Requirements((((5.1.3  HYPERLINK \l "_5.1.3_Specification_Analysis" Specification Analysis((((4.2.3  HYPERLINK \l "_4.2.4_Formal_Methods" Formal Methods - Specification Development(((((4.2.5  HYPERLINK \l "_4.2.6_Formal_Inspections" Formal Inspections of Specifications(((5.1.5  HYPERLINK \l "_5.1.5_Timing,_Throughput" Timing, Throughput And Sizing Analysis((((5.1.6  HYPERLINK \l "_5.1.6_Software_Fault" Software Fault Tree Analysis((((Table 3-8 Software Architectural Design Phase TechniqueSafety Effort LevelMINMODFULL4.3.3  HYPERLINK \l "_4.3.2_Selection_of" COTS and software reuse examination 7.1  HYPERLINK \l "_7.1_Off-the-Shelf_Software" Off-the-Shelf Software((((4.3.4  HYPERLINK \l "_4.3.3_Selection_of" Selection of programming language, environment, tools, and operating system((((4.3.5  HYPERLINK \l "_4.3.5_Coding_Standards" Coding Standards(((5.2.1  HYPERLINK \l "_5.2.1_Update_Criticality" Update Criticality Analysis((((5.2.2  HYPERLINK \l "_5.2.2_Conduct_Hazard" Conduct Hazard Risk Assessment((((5.2.3  HYPERLINK \l "_5.2.3_Analyze_Architectural" Analyze Architectural Design((((5.2.4.1  HYPERLINK \l "_5.2.4.1_Interdependence_Analysis" Interdependence Analysis((((5.2.4.2 HYPERLINK \l "_5.2.4.2__Independence"  Independence Analysis((((5.2.5  HYPERLINK \l "_5.2.5_Update_Timing," Update Timing/Throughput/Sizing Analysis((((5.2.6 HYPERLINK \l "_5.2.6_Update_Software"  Update Software Fault Tree Analysis((((5.2.7  HYPERLINK \l "_5.2.7_Formal_Inspections" Formal Inspections of Architectural Design Products((((5.2.8  HYPERLINK \l "_5.2.8__Formal" Formal Methods and Model Checking(((( Table 3-9 Software Detailed Design Phase TechniqueSafety Effort LevelMINMODFULL4.2.4  HYPERLINK \l "_4.2.5__Model" Model Checking((((5.3.1  HYPERLINK \l "_5.3.1_Design_Logic" Data Logic Analysis(((5.3.2  HYPERLINK \l "_5.3.2_Design_Data" Design Data Analysis((((5.3.3 HYPERLINK \l "_5.3.3_Design_Interface"  Design Interface Analysis((((5.3.4  HYPERLINK \l "_5.3.4_Design_Constraint" Design Constraint Analysis((((5.3.5  HYPERLINK \l "_5.3.5_Design_Functional" Design Functional Analysis((((5.3.6  HYPERLINK \l "_5.3.6_Software_Element" Software Element Analysis((((5.3.7 HYPERLINK \l "_5.3.7_Rate_Monotonic"Rate Monotonic Analysis((((5.3.8 HYPERLINK \l "_5.3.8_Dynamic_Flowgraph"Dynamic Flowgraph Analysis((((5.3.9 HYPERLINK \l "_5.3.9_Markov_Modeling"Markov Modeling((((5.3.10 HYPERLINK \l "_5.3.10_Measurement_of"Measurement of Complexity(((((5.3.11 HYPERLINK \l "_5.3.11_Selection_of"Selection of Programming languages((((5.3.12 HYPERLINK \l "_5.3.12_Formal_Methods"Formal Methods and Model Checking ((((5.3.13 HYPERLINK \l "_5.3.13_Requirements_State"Requirements State Machines((((5.3.14 HYPERLINK \l "_5.3.14_Formal_Inspections"Formal Inspections of Detailed Design Products(((5.3.15 HYPERLINK \l "_5.3.15_Software_Failure"Software Failure Modes and Effects Analysis((((5.3.16 HYPERLINK \l "_5.3.16_Updates_to"Updates to Previous Analyses (SFTA, Timing, Criticality, etc.)((((( Table 3-10 Software Implementation Phase TechniqueSafety Effort LevelMINMODFULL4.5.1  HYPERLINK \l "_4.5.1_Coding_Checklists" Coding Checklists(((4.5.2  HYPERLINK \l "_4.5.2_Defensive_Programming" Defensive Programming((((4.5.3  HYPERLINK \l "_4.5.3_Refactoring" Refactoring(((((5.4.1  HYPERLINK \l "_5.4.1_Code_Logic" Code Logic Analysis((((5.4.2  HYPERLINK \l "_5.4.2_Code_Data" Code Data Analysis((((5.4.3  HYPERLINK \l "_5.4.3_Code_Interface" Code Interface Analysis((((5.4.4  HYPERLINK \l "_5.4.4_Update_Measurement" Update Measurement of Complexity((((5.4.5  HYPERLINK \l "_5.4.5_Update_Design" Update Design Constraint Analysis((((5.4.6  HYPERLINK \l "_5.4.6_Formal_Code" Formal Code Inspections, Checklists, and Coding Standards((((5.4.7  HYPERLINK \l "_5.4.7_Applying_Formal" Formal Methods(((((5.4.8  HYPERLINK \l "_5.4.8_Unused_Code" Unused Code Analysis((((5.4.9 HYPERLINK \l "_5.4.9_Interrupt_Analysis"  Interrupt Analysis((((5.4.10  HYPERLINK \l "_5.4.10__Final" Final Timing, Throughput, and Sizing Analysis((((5.4.11  HYPERLINK \l "_5.4.11_Program_Slicing" Program Slicing(((((5.4.12  HYPERLINK \l "_5.4.12_Update_Software" Update Software Failure Modes and Effects Analysis(((( Table 3-11 Software Testing Phase TechniqueSafety Effort LevelMINMODFULL4.5.4  HYPERLINK \l "_4.5.4_Unit_Level" Unit Level Testing((((4.6.3  HYPERLINK \l "_4.6.3_Integration_Testing" Integration Testing((((4.6.5  HYPERLINK \l "_4.6.4_System_Testing" System & Functional Testing(((4.6.6  HYPERLINK \l "_4.6.5__Software" Regression Testing(((4.6.7 HYPERLINK \l "_4.6.7_Software_Safety"Software Safety Testing(((7.1.4 HYPERLINK \l "_7.1.4_Who_Tests"OTS Analyses and Test((((5.5.1  HYPERLINK \l "_5.5.1_Test_Coverage" Test Coverage Analysis((((5.5.2  HYPERLINK \l "_5.5.2_Formal_Inspections" Formal Inspections of Test Plan and Procedures((((5.5.3  HYPERLINK \l "_5.5.3_Reliability_Modeling" Reliability Modeling((((5.5.4  HYPERLINK \l "_5.5.4_Checklists_of" Checklists of Tests(((((5.5.5  HYPERLINK \l "_5.5.5_Test_Results" Test Results Analysis(((5.5.6  HYPERLINK \l "_5.5.6_Independent_Verification" Independent Verification and Validation(((( Table 3-12 Dynamic Testing (Unit or Integration Level) TechniqueSafety Effort LevelMINMODFULLTypical sets of sensor inputs((((Test specific functions((((Volumetric and statistical tests(((((Test extreme values of inputs(((Test all modes of each sensor(((Every statement executed once((((Every branch tested at least once((((Every predicate term tested((((Every loop executed 0, 1, many, max-1, max, max+1 times(((Every path executed((((Every assignment to memory tested(((((Every reference to memory tested(((((All mappings from inputs checked(((((All timing constraints verified(((Test worst case interrupt sequences(((Test significant chains of interrupts(((Test Positioning of data in I/O space((((Check accuracy of arithmetic((((All modules executed at least once(((All invocations of modules tested(((( Table 3-13 Software System Testing TechniqueSafety Effort LevelMINMODFULLSimulation (Test Environment)((((Load Testing((((Stress Testing((((Boundary Value Tests((((Test Coverage Analysis((((Functional Testing(((Performance Monitoring((((Disaster Testing(((((Resistance to Failure Testing((((Red Team Testing((((Formal Progress Reviews((( HYPERLINK \l "_5.5.3_Reliability_Modeling" Reliability Modeling((((( HYPERLINK \l "_5.5.4_Checklists_of" Checklists of Tests((((( 4. SAFETY CRITICAL SOFTWARE DEVELOPMENT The following requirements and guidelines support the cardinal safety rule and its corollary that no single event or action shall be allowed to initiate a potentially hazardous event, and that the system, upon detection of an unsafe condition or command, shall inhibit the potentially hazardous event sequence and originate procedures/functions to bring the system to a predetermined safe state. The purpose of this section is to describe the software safety activities, which should be incorporated into the software development phases of project development. The software safety information, which should be included in the documents produced during these phases, is also discussed. If NASA standards or guidelines exist which define the format and/or content of a specific document, they are referenced and should be followed. The term "software components" is used in a general sense to represent important software development products such as software requirements, software designs, software code or program sets, software tests, etc. 4.1 Software Concept and Initiation Phase For most NASA projects this life-cycle phase involves system level requirements and design development. Although most project work during this phase is concentrated on the subsystem level, software development has several tasks that must be initiated. These include the creation of important software documents and plans which will determine how, what, and when important software products will be produced or activities will be conducted. Each of the following documents should address software safety issues: Table 4-1 Software Safety Documentation DocumentSoftware Safety SectionSystem Safety PlanInclude software as a subsystem, identify tasks.Software Concepts DocumentIdentify safety critical processes.Software Management Plan and Software Configuration Management PlanCoordination with systems safety tasks, flow-down incorporation of safety requirements. Applicability to safety critical software.Software Security PlanSecurity of safety critical software.Software Quality Assurance PlanSupport to software safety, verification of software safety requirements, safety participation in software reviews and inspections. 4.2 Software Requirements Phase The cost of correcting software faults and errors escalates dramatically as the development life cycle progresses, making it important to correct errors and implement correct software requirements from the very beginning. Unfortunately, it is generally impossible to eliminate all errors. Software developers must therefore work toward two goals: To develop complete and correct requirements and correct code To develop fault-tolerant designs, which will detect and compensate for software faults on the fly. NOTE: (2) is required because (1) is usually impossible. This section of the guidebook describes safety involvement in developing safety requirements for software. The software safety requirements can be top-down (flowed down from system requirements) and/or bottom-up (derived from hazards analyses). In some organizations, top-down flow is the only permitted route for requirements into software, and in those cases, newly derived bottom-up safety requirements must be flowed back into the system specification. The requirements of software components are typically expressed as functions with corresponding inputs, processes, and outputs, plus additional requirements on interfaces, limits, ranges, precision, accuracy, and performance. There may also be requirements on the data of the program set, its attributes, relationships, and persistence, among others. Software safety requirements are derived from the system and subsystem safety requirements developed to mitigate hazards identified in the Preliminary, System, and Subsystems Hazard Analyses (see  HYPERLINK \l "_2.3_Preliminary_Hazard" Section 2.4 PHA). Also, system safety flows requirements to systems engineering. The systems engineering group and the software development group have a responsibility to coordinate and negotiate requirements flow-down to be consistent with the software safety requirements flow-down. The software safety organization should flow requirements into the following documents: Software Requirements Document (SRD) Software Interface Specification (SIS) or Interface Control Document (ICD) Safety-related requirements must be clearly identified in the SRD. SIS activities identify, define, and document interface requirements internal to the [sub]system in which software resides, and between system (including hardware and operator interfaces), subsystem, and program set components and operation procedures. Note: that the SIS is sometimes effectively contained in the SRD, or within an Interface Control Document (ICD) which defines all system interfaces, including hardware to hardware, hardware to software, and software to software. 4.2.1 Development of Software Safety Requirements Software safety requirements are obtained from several sources, and are of two types: generic and specific. The generic category of software safety requirements are derived from sets of requirements that can be used in different programs and environments to solve common software safety problems. Examples of generic software safety requirements and their sources are given in  HYPERLINK \l "_4.2.2__Generic" Section 4.2.2 Generic Software Safety Requirements. Specific software safety requirements are system unique functional capabilities or constraints that are identified in three ways: Method 1Through top down analysis of system design requirements and specifications:The system requirements may identify system hazards up-front, and specify which system functions are safety critical or a Fault Tree Analysis may be completed to identify safety critical functions. The software safety organization participates or leads the mapping of these requirements to software.Method 2From the Preliminary Hazard Analysis (PHA):PHA looks down into the system from the point of view of system hazards. Preliminary hazard causes are mapped to, or interact with, software. Software hazard control features are identified and specified as requirements.Method 3Through bottom up analysis of design data, (e.g. flow diagrams, Failure Mode Effects and Criticality Analysis (FMECA) etc.)Design implementations allowed but not anticipated by the system requirements are analyzed and new hazard causes are identified. Software hazard controls are specified via requirements when the hazard causes are linked to or interact with software. 4.2.1.1 Safety Requirements Flow-down Generic safety requirements are established a priori and placed into the system specification and/or overall project design specifications. From there they are flowed into lower level unit and module specifications. Other safety requirements, derived from bottom-up analysis, are flowed up from subsystems and components to the system level requirements. These new system level requirements are then flowed down across all affected subsystems. During the System Requirements Phase, subsystems and components may not be well defined. In this case, bottom-up analysis might not be possible until the Architectural Design Phase or even later.  HYPERLINK \l "_5.1.1_Software_Safety" Section 5.1.1 Software Safety Requirements Flow-down Analysis, verifies that the safety requirements have been properly flowed into the specifications. Benefit-to-Cost Rating: HIGH 4.2.2 Generic Software Safety Requirements Similar processors/platforms and/or software can suffer from similar or identical design problems. Generic software safety requirements are derived from sets of requirements and best practices used in different programs and environments to solve common software safety problems. Generic software safety requirements capture these lessons learned and provide a valuable resource for developers. Generic requirements prevent costly duplication of effort by taking advantage of existing proven techniques and lessons learned rather than reinventing techniques or repeating mistakes. Most development programs should be able to make use of some generic requirement; however, they should be used with care and may have to be tailored from project to project. As technology evolves, or as new applications are implemented, new "generic" requirements will likely arise, and other sources of generic requirements might become available. A partial listing of sources for generic requirement is shown below: NSTS 19943 Command Requirements and Guidelines for NSTS Customers STANAG 4404 (Draft) NATO Standardization Agreement (STANAG) Safety Design Requirements and Guidelines for Munition Related Safety Critical Computing Systems WSMCR 127-1 Range Safety Requirements - Western Space and Missile Center, Attachment-3 Software System Design Requirements. This document is being replaced by EWRR (Eastern and Western Range Regulation) 127-1, Section 3.16.4 Safety Critical Computing System Software Design Requirements. AFISC SSH 1-1 System Safety Handbook - Software System Safety, Headquarters Air Force Inspection and Safety Center. EIA Bulletin SEB6-A System Safety Engineering in Software Development (Electrical Industries Association) Underwriters Laboratory - UL 1998 Standard for Safety - Safety-Related Software, January 4th, 1994 NUREG/CR-6263 MTR 94W0000114 High Integrity Software for Nuclear Power Plants, The MITRE Corporation, for the U.S. Nuclear Regulatory Commission.  HYPERLINK \l "_E.2_Generic_Software" Appendix E has a listing of Marshall Space Flight Center (MSFC) identified generic software safety requirements. They are provided in a checklist format. Benefit-to-Cost Rating: HIGH 4.2.2.1 Fault and Failure Tolerance/Independence Most NASA space systems employ failure tolerance (as opposed to fault tolerance) to achieve an acceptable degree of safety. This is primarily achieved via hardware, but software is also important, because improper software design can defeat the hardware failure tolerance and vice versa. Not all faults lead to a failure, however, every failure results from one or more faults. A fault is an error that does not affect the functionality of the system, such as bad data from either input, calculations, or output, an unknown command, or a command or data coming at an unknown time. If properly designed, the software, or system, can respond to glitches by detecting these errors and correcting them, intelligently. This would include checking input and output data by possibly doing limit checking and setting the value to a known safe value, or requesting and/or waiting for the next data point. For I/O, have CRC checks and handshaking so that garbled or unrecognized messages could be detected and either dropped or request retransmission of the message. Occasional bad I/O, data or commands should not be considered failures, unless there are too many of them and the system can not handle them. One or more intelligent fault collection routines should be part of the program to track, and possibly log, the number and type of errors. These collection routines would then either handle the caution and warning and/or recovery for the software system, or each collection routine could raise a flag to a higher level of control when the number of faults over time or the combination of fault types is programmed to determine that a system failure is imminent. With faults, the system should continue to operate normally. A failure tolerant design detects a failure and puts the software and/or system into a changed operating state, either by switching to backup software or hardware (i.e. s/w routine, program, CPU, secondary sensor input or valve cut-off, etc.) or by reducing the functionality of the system but continuing to operate. The question arises whether a system is to be built fault or failure tolerant or both. If the system, including software, is built to handle most probable, and some less probable but hazardous faults, it may be able to preclude many possible failure scenarios. Taking care of problems while they are still faults can help prevent the software, or the system, from going into failure. The complaint with building in fault tolerance is that it requires multiple checks and monitoring at very low levels. If major failures can be detected, isolated, stopped or recovered from, it is presumed that this would require less work and be simpler than fault tolerance. For safety critical systems, it is best to design in both fault and failure tolerance. The fault tolerance keeps most of the minor errors from propagating into failures. Failures must still be detected and dealt with, whether as a result of fault collection/monitoring routines or by direct failure detection routines and/or hardware. In this guidebook, both fault and failure tolerance are discussed. The proper blending of both to meet the requirements of your particular system must be determined by the software designers and the safety engineers. Must Work Functions (MWFs) MWFs achieve failure tolerance through independent parallel redundancy. For parallel redundancy to be truly independent there must be dissimilar software in each parallel path. Software can sometimes be considered dissimilar if N-Version programming is properly applied, see Section 4.3 Architectural Design Phase. Must Not Work Functions (MNWFs) MNWFs achieve failure tolerance through independent multiple series inhibits. For series inhibits to be considered independent they must be generally controlled by different processors containing dissimilar software. In both cases, software must be specified to preserve the hardware failure tolerance via proper allocation amongst hardware units. Fault/Failure Detection, Isolation and Recovery (FDIR) FDIR is a problematic design area, where improper design can result in system false alarms, bogus system failures, or failure to detect important safety critical system failures. FDIR for the NASA Space Station has included two approaches: Shadowing: Fail-safe active hazard detection and safing can be used where a higher tier processor can monitor a lower tier processor and shut down the lower processor in the event that pre-defined allowable conditions are violated. Higher tier processors can emulate the application running in the lower tier, and compare predicted (expected) parameter values to actual measured values. This technique is sometimes called shadowing or convergence testing. Built-in Test( BIT): Sometimes FDIR can be based on self-test (BIT) of lower tier processors where lower level units test themselves, and report their good/bad status to a higher processor. The higher processor switches out units reporting a failed or bad status. If too many faults or very serious failures occur, it may be necessary for the system to shut itself down in an orderly, safe manner. (For example, in the event of a power outage the system might continue for a short period of time on limited battery power during which period the software should commence an orderly shutdown to a safe system state). Software responses to off-nominal scenarios should address safety considerations, and be appropriate to the situation. Complete system shutdown may not be appropriate in many cases. How to achieve independence and other failure tolerance development methods are discussed more in  HYPERLINK \l "_4.3_Architectural_Design" Section 4.3 Architectural Design Phase. Benefit-to-Cost Rating: MEDIUM 4.2.2.2 Hazardous Commands  HYPERLINK \l "_Glossary_of_Terms" Appendix-A Glossary of Terms defines a hazardous command. Commands can be internal to a software set (e.g., from one module to another) or external, crossing an interface to/from hardware or a human operator. Longer command paths increase the probability of an undesired or incorrect command response due to noise on the communications channel, link outages, equipment malfunctions, or (especially) human error. Reference [26] NSTS 1700.7B section 218 defines hazardous command as ...those that can remove an inhibit to a hazardous function, or activate an unpowered payload system. It continues to say Failure modes associated with payload flight and ground operations including hardware, software, and procedures used in commanding from payload operations control centers (POCCs) and other ground equipment must be considered in the safety assessment to determine compliance with the (failure tolerance) requirements. NSTS 19943 treats the subject of hazardous commanding and presents the guidelines by which it will be assessed. NSTS 1700.7B section 218 focuses on remote commanding of hazardous functions, but the principles can and should be generally applied. Both NSTS 19943 and EWRR 127-1 (Paragraph 3.16.7 b) recommend and require respectively, two-step commanding. EWRR 127-1 states Two or more unique operator actions shall be required to initiate any potentially hazardous function or sequence of functions. The actions shall be designed to minimize the potential for inadvertent actuation. Note that two-step commanding is in addition to any hardware (or software) failure tolerance requirements, and is neither necessary nor sufficient to meet failure tolerance requirements. A two-step command does not constitute an inhibit. (See Glossary Appendix A for definition of inhibit.) Software interlocks or preconditions can be used to disable certain commands during particular mission phases or operational modes. However, provision should be made to provide access to (i.e. enable) all commands in the event of unexpected emergency situations. Emergency command access is generally required by flight crews. For example, when Apollo 13 experienced major problems, the nominal Lunar Module power up sequence timeline could not be completed before the Command Module battery power expired. A different (shorter) sequence was improvised. Benefit-to-Cost Rating: HIGH 4.2.2.3 Timing, Sizing and Throughput Considerations System design should properly consider real-world parameters and constraints, including human operator and control system response times, and flow these down to software. Adequate margins of capacity should be provided for all these critical resources. This section provides guidance for developers in specifying software requirements to meet the safety objectives. Subsequent analysis of software for Timing, Throughput and Sizing considerations is discussed in  HYPERLINK \l "_5.1.5_Timing,_Throughput" Section 5.1.5 Timing, Sizing and Throughput Analysis. Time to Criticality Safety critical systems sometimes have a characteristic time to criticality, which is the time interval between a fault occurring and the system reaching an unsafe state. This interval represents a time window in which automatic or manual recovery and/or safing actions can be performed, either by software, hardware, or by a human operator. The design of safing/recovery actions should fully consider the real-world conditions and the corresponding time to criticality. Automatic safing can only be a valid hazard control if there is ample margin between worst case (long) response time and worst case (short) time to criticality. Automatic safing Automatic safing is often required if the time to criticality is shorter than the realistic human operator response time, or if there is no human in the loop. This can be performed by either hardware or software or a combination depending on the best system design to achieve safing. Control system design Control system design can define timing requirements. The design is based on the established body of classical and modern dynamic control theory, such as dynamic control system design, and multivariable design in the s-domain (Laplace transforms) for analog continuous processes. Systems engineers are responsible for overall control system design. Computerized control systems use sampled data (versus continuous data). Sampled analog processes should make use of Z-transforms to develop difference equations to implement the control laws. This will also make most efficient use of real-time computing resources.[1] Sampling rates Sampling rates should be selected with consideration for noise levels and expected variations of control system and physical parameters. For measuring signals that are not critical, the sample rate should be at least twice the maximum expected signal frequency to avoid aliasing. For critical signals, and parameters used for closed loop control, it is generally accepted that the sampling rate must be much higher. A factor of at least ten above the system characteristic frequency is customary. [1] Dynamic memory allocation Dynamic memory allocation requires several variety of resources be available and adequate. The amount of actual memory (RAM) available, whether virtual memory (disk space) is used, how much memory the software (programs and operating system) uses statically, and how much is dynamically allocated are all factors in whether a dynamic allocation will fail or succeed. Several factors may not be known in detail, and worst-case values should be used. How the software deals with failed dynamic allocation should be considered. What a particular language or compiler does in such a situation should be thoroughly researched. If an exception or signal is generated, it should be handled by the application software. Allowing a default similar to the MS-DOS abort, retry, fail is a very bad idea for safety critical software. Critical memory blocks must be identified and protected from inadvertent corruption or deletion. Since the dynamic allocation mechanism is usually unknown (defined by the compiler vendor), methods must be used to detect or prevent the allocation of any of the critical areas. Processors with Memory Management Units (MMU) provide one mechanism. Checking the address range returned by the dynamic allocation routine against the critical memory addresses will work in systems that use physical (RAM) addresses or logical memory addresses. Care must be taken that logical and physical addresses are not compared to each other. CRC values or error-correcting codes are software ways to detect and/or correct critical data that may be accidentally overwritten. Memory Checking Testing of random access memory (RAM) can be a part of BIT/self-test and is usually done on power up of a system to verify that all memory addresses are available, and that the RAM is functioning properly. Memory tests may be done periodically to check for problems that may occur, due to a single event upset or other hardware (RAM) problems. Memory utilization checks may be used to give advance warning of imminent saturation of memory. Quantization Digitized systems should select word lengths long enough to reduce the effects of quantization noise to ensure stability of the system [10]. Selection of word lengths and floating point coefficients should be appropriate with regard to the parameters being processed in the context of the overall control system. Too short word lengths can result in system instability and misleading readouts. Too long word lengths result in excessively complex software and heavy demand on CPU resources, scheduling and timing conflicts etc. Computational Delay Computers take a finite time to read data and to calculate and output results, so some control parameters will always be out of date. Controls systems must accommodate this. Also, check timing clock reference datum, synchronization and accuracy (jitter). Analyze task scheduling (e.g., with Rate Monotonic Analysis (RMA)). Benefit-to-Cost Rating: HIGH 4.2.3 Formal Methods - Specification Development Reference [25] states: Formal Methods (FM) consists of a set of techniques and tools based on mathematical modeling and formal logic that are used to specify and verify requirements and designs for computer systems and software. Formal Methods have not gained a wide acceptance among all industries, mostly due to the difficulty of the formal proofs. A considerable learning curve must be surmounted for newcomers, which can be expensive. Once this hurdle is surmounted successfully, some users find that it can reduce overall development life-cycle cost by eliminating many costly defects prior to coding. In addition, many tools are now available to aid in using Formal Methods. Software and system requirements are usually written in human-readable language. This can lead to ambiguity, when a statement that is clear to one person is interpreted differently by another. To avoid this ambiguity, requirements can be written in a formal, mathematical language. This is the first step in applying Formal Methods. In the production of safety-critical systems or systems that require high assurance, Formal Methods provide a methodology that gives the highest degree of assurance for a trustworthy software system. Formal Methods have been used with success on both military and commercial systems that were considered safety-critical applications. The benefits from the application of the methodology accrue to both safety and non-safety areas. Formal Methods do not guarantee a precise quantifiable level of reliability; at present they are only acknowledged as producing systems that provide a high level of assurance. On a qualitative level, the following list identifies different levels of application of assurance methods in software development [31]. They are ranked by the perceived level of assurance achieved with the lowest numbered approaches representing the highest level of assurance. Each of the approaches to software development is briefly explained by focusing on that part of the development that distinguishes it from the other methods. Formal development down to object code Formal development down to object code requires that formal mathematical proofs be carried out on the executable code. Formal development down to source code Formal development down to source code requires that the formal specification of the system undergo proofs of properties of the system. Rigorous development down to source code Rigorous development down to source code is when requirements are written in a formal specification language and emulators of the requirements are written. The emulators serve the purpose of a prototype to test the code for correctness of functional behavior. Structured development to requirements Structured development to requirements analysis then rigorous development down to source code performs all of the steps from the previous paragraph. The source code undergoes a verification process that resembles a proof but falls short of one. Structured development down to source code Structured development down to source code is the application of the structured analysis/structured design method proposed by DeMarco [32]. It consists of a conceptual diagram that graphically illustrates functions, data structures, inputs, outputs, and mass storage and their interrelationships. Code is written based on the information in the diagram. Ad hoc Ad hoc techniques encompass all of the non-structured and informal techniques (i.e. hacking, code a little then test a little). The methodology described in this guidebook is that of level 3, rigorous development down to source code. Detailed descriptions of Formal methods are given in the NASA Formal Methods Guidebook [25]. In addition, the following publications are recommended reading as primers in Formal Methods: Rushby [29], Miller, et al [30], and Butler, et al [31]. Anthony Hall [39] gives Seven Myths of Formal Methods, and discusses using formal specification of requirements without formal proofs in a real-world development environment. Richard Kemmerer [40] shows how to integrate formal methods with the development process. Benefit-to-Cost Rating: MEDIUM The following descriptions of Formal Methods are taken from the NASA Langley FM Group internet World Wide Web home page: 4.2.3.1 Why Is Formal Methods Necessary? A digital system may fail as a result of either physical component failure, or design errors. The validation of an ultra-reliable system must deal with both of these potential sources of error. Well known techniques exist for handling physical component failure; these techniques use redundancy and voting. The reliability assessment problem in the presence of physical faults is based upon Markov modeling techniques and is well understood. The design error problem is a much greater threat. Unfortunately, no scientifically justifiable defense against this threat is currently used in practice. There are 3 basic strategies that are advocated for dealing with the design error: Testing (lots of it) Design Diversity (i.e. software fault-tolerance: N-version programming, recovery blocks, etc.) Fault/Failure Avoidance (i.e. formal specification/verification, automatic program synthesis, reusable modules) The problem with life testing is that in order to measure ultra reliability one must test for exorbitant amounts of time. For example, to measure a 10-9 probability of failure for a 1 hour mission one must test for more than 114,000 years. Many advocate design diversity as a means to overcome the limitations of testing. The basic idea is to use separate design/implementation teams to produce multiple versions from the same specification. Then, non-exact threshold voters are used to mask the effect of a design error in one of the versions. The hope is that the design flaws will manifest errors independently or nearly so. By assuming independence one can obtain ultra-reliable-level estimates of reliability even though the individual versions have failure rates on the order of 10-4. Unfortunately, the independence assumption has been rejected at the 99% confidence level in several experiments for low reliability software. Furthermore, the independence assumption cannot ever be validated for high reliability software because of the exorbitant test times required. If one cannot assume independence then one must measure correlations. This is infeasible as well, since it requires as much testing time as life-testing the system because the correlations must be in the ultra-reliable region in order for the system to be ultra-reliable. Therefore, it is not possible, within feasible amounts of testing time, to establish that design diversity achieves ultra-reliability. Consequently, design diversity can create an illusion of ultra-reliability without actually providing it. It is felt that formal methods currently offers the only intellectually defensible method for handling the design fault problem. Because the often quoted 1 - 10-9 reliability is well beyond the range of quantification, there is no choice but to develop life-critical systems in the most rigorous manner available to us, which is the use of formal methods. 4.2.3.2 What Is Formal Methods? Traditional engineering disciplines rely heavily on mathematical models and calculation to make judgments about designs. For example, aeronautical engineers make extensive use of computational fluid dynamics (CFD) to calculate and predict how particular airframe designs will behave in flight. We use the term formal methods to refer to the variety of mathematical modeling techniques that are applicable to computer system (software and hardware) design. That is, formal methods is the applied mathematics engineering and, when properly applied, can serve a role in computer system design analogous to the role CFD serves in aeronautical design. Formal methods may be used to specify and model the behavior of a system and to mathematically verify that the system design and implementation satisfy system functional and safety properties. These specifications, models, and verifications may be done using a variety of techniques and with various degrees of rigor. The following is an imperfect, but useful, taxonomy of the degrees of rigor in formal methods: Level-1: Formal specification of all or part of the system. Level-2: Formal specification at two or more levels of abstraction and paper and pencil proofs that the detailed specification implies the more abstract specification. Level-3: Formal proofs checked by a mechanical theorem prover. Level 1 represents the use of mathematical logic or a specification language that has a formal semantics to specify the system. This can be done at several levels of abstraction. For example, one level might enumerate the required abstract properties of the system, while another level describes an implementation that is algorithmic in style. Level 2 formal methods goes beyond Level 1 by developing pencil-and-paper proofs that the more concrete levels logically imply the more abstract-property oriented levels. This is usually done in the manner illustrated below. Level 3 is the most rigorous application of formal methods. Here one uses a semi-automatic theorem prover to make sure that all of the proofs are valid. The Level 3 process of convincing a mechanical prover is really a process of developing an argument for an ultimate skeptic who must be shown every detail. Formal methods is not an all-or-nothing approach. The application of formal methods to only the most critical portions of a system is a pragmatic and useful strategy. Although a complete formal verification of a large complex system is impractical at this time, a great increase in confidence in the system can be obtained by the use of formal methods at key locations in the system. 4.2.4 Model Checking Model checking is a form of Formal Methods that verifies finite-state systems. It is an automatic method, and tools exist to provide that automation (for instance: SPIN and SMV). Model checking can be applied to more than just software, and has been used to formally verify industrial systems. The technique is especially aimed at the verification of reactive, embedded systems, i.e. systems that are in constant interaction with the environment. Model checking can be applied relatively easily at any stage of the existing software process without causing major disruptions. It has been extended to work with at least some infinite-state systems and also with real-time systems. Model checking can verify simple properties like reachability (does as system ever reach a certain state) or lack-of-deadlock (is deadlock avoided in the system), or more complex properties like safety (nothing bad ever happens) or liveness (something good eventually happens). Benefit-to-Cost Rating: MEDIUM 4.2.4.1 How Model Checking Works The first step in model checking is to describe the system in a state-based, formal way. Each model checker uses its own language for system description. The second step is to express program flow using prepositional temporal logic. This logic deals with transitions from one state to another (stepping through the program), and what may or may not be true in each state. For instance, you can express a formula (property) that is true in some future state (eventually) or in all future states (always). Once the system is modeled and the temporal logic is determined, algorithms are used to traverse the model defined by the system and check if the specification holds or not. Very large state-spaces can often be traversed in minutes. The technique has been applied to several complex industrial systems, ranging from hardware to communication protocols to safety critical plants and procedures. For more details, the book Model Checking [37] describes the technique in detail. The website  HYPERLINK "http://www.abo.fi/~johan.lilius/mc/mclinks.html" http://www.abo.fi/~johan.lilius/mc/mclinks.html contains references to current model checking research, people, tools, and projects. General information, as well as reviews of SMV and SPIN (the automated tools described below), can be found at:  HYPERLINK "http://www.math.hmc.edu/~jpl/modelcheckers.html" http://www.math.hmc.edu/~jpl/modelcheckers.html. 4.2.4.2 Tools Among the automated tools, the primary ones are SMV and SPIN. SMV is a symbolic model checker specialized on the verification of synchronous and asynchronous systems. SPIN is an on-the-fly model checker specialized on the verification of asynchronous systems. Spin ( HYPERLINK "http://netlib.bell-labs.com/netlib/spin/whatispin.html" http://netlib.bell-labs.com/netlib/spin/whatispin.html) is designed to test the specifications of concurrent (distributed) systems-- specifically communications protocols, though it applies to any concurrent system. It will find deadlocks, busy cycles, conditions that violate assertions, and race conditions. The software was developed at Bell Labs in the formal methods and verification group starting in 1980. Spin targets efficient software verification, not hardware verification. It uses a high level language to specify systems descriptions (PROMELA - PROcess MEta LAnguage). Spin has been used to trace logical design errors in distributed systems design, such as operating systems, data communications protocols, switching systems, concurrent algorithms, railway signaling protocols, etc. The tool checks the logical consistency of a specification. Spin also reports on deadlocks, unspecified receptions, flags incompleteness, race conditions, and unwarranted assumptions about the relative speeds of processes. It uses an on-the-fly approach where not all of the model must be in memory at once. SMV (Symbolic Model Verifier) ( HYPERLINK "http://www.cs.cmu.edu/~modelcheck/smv.html" http://www.cs.cmu.edu/~modelcheck/smv.html) comes from Carnegie Mellon University. The SMV system requires specifications to be written in the temporal logic CTL, and uses Kripke diagrams. The input language of SMV is designed to allow the description of finite state systems that range from completely synchronous to completely asynchronous, and from the detailed to the abstract. The language provides for modular hierarchical descriptions, and for the definition of reusable components. Since it is intended to describe finite state machines, the only data types in the language are finite ones - Booleans, scalars and fixed arrays. The logic CTL allows safety, liveness, fairness, and deadlock freedom to be specified syntactically. In addition, other academic systems include: HyTech ( HYPERLINK "http://www-cad.EECS.Berkeley.EDU/~tah/HyTech/" http://www-cad.EECS.Berkeley.EDU/~tah/HyTech/) Kronos ( HYPERLINK "http://www-verimag.imag.fr//TEMPORISE/kronos/index-english.html" http://www-verimag.imag.fr//TEMPORISE/kronos/index-english.html) MONA ( HYPERLINK "http://www.brics.dk/mona/" http://www.brics.dk/mona/) Murphi ( HYPERLINK "http://sprout.stanford.edu/dill/murphi.html" http://sprout.stanford.edu/dill/murphi.html) TREAT ( HYPERLINK "http://www.cis.upenn.edu/~lee/inhye/treat.html" http://www.cis.upenn.edu/~lee/inhye/treat.html) TVS ( HYPERLINK "http://tvs.twi.tudelft.nl/" http://tvs.twi.tudelft.nl/) STEP ( HYPERLINK "http://rodin.stanford.edu/" http://rodin.stanford.edu/) UPPAAL ( HYPERLINK "http://www.docs.uu.se/docs/rtmv/uppaal/index.html" http://www.docs.uu.se/docs/rtmv/uppaal/index.html) Verus ( HYPERLINK "http://www.cs.cmu.edu/~modelcheck/verus.html" http://www.cs.cmu.edu/~modelcheck/verus.html) Vis ( HYPERLINK "http://www-cad.eecs.berkeley.edu/~vis/" http://www-cad.eecs.berkeley.edu/~vis/) Commercial programs include: FormalCheck ( HYPERLINK "http://www.cadence.com/eda_solutions/flv_fveimc_l3_index.html" http://www.cadence.com/eda_solutions/flv_fveimc_l3_index.html) Time Rover ( HYPERLINK "http://www.time-rover.com/TRindex.html" http://www.time-rover.com/TRindex.html) Rational Rose add-in ( HYPERLINK "http://www.rationalrose.com/modelchecker/" http://www.rationalrose.com/modelchecker/) 4.2.4.3 Challenges The main challenge in model checking is the state explosion problem -- the fact that the number of states in the model is frequently so large that model checkers exceed the available memory and/or the available time. Several techniques are used to cope with this problem. One type of technique is to build only a part of the state-space of the program, while still maintaining the ability to check the properties of interest. These are partial-order techniques (interleaving) and abstraction techniques (simpler system). The symbolic approach is another way to overcome the problem. The idea is to implicitly represent the states and transitions of the system, rather than explicitly. Binary Decision Diagrams (BDDs) is an efficient encoding of Boolean formulas, and is the usual implicit representation. The BDD is used with the temporal formulas for the model checking. Therefore, the size of the BDD representation is the limiting factor and not the size of the explicit state representation. On-the-fly techniques analyze portions of the model as it goes along, so that not all of it must be in memory at any one time. 4.2.5 Formal Inspections of Specifications Formal Inspections* are structured technical reviews of a product of the software development life cycle, conducted for the purpose of finding and eliminating defects. The product can be any documentation, including requirements, design notes, test plans, or the actual source code. Formal Inspections differ from informal reviews or walkthroughs in that there are specified steps to be taken, and roles are assigned to individual reviewers. Formal inspections are not formal methods! Formal inspections are a structured way to find defects in some software product, from a requirements document to the actual code. Formal methods are a mathematical way of specifying and verifying a software system. The two methods can be used together or separately. NASA has published a standard and guidebook for implementing the Formal Inspection (FI) process, Software Formal Inspections Standard (NASA-STD-2202-93) [36] and Software Formal Inspections Guidebook (NASA-GB-A302) [25]. FIs should be performed within every major step of the software development process, including requirements specification, design, coding, and testing. Formal Inspections have the most impact when applied early in the life of a project, especially the requirements specification and definition stages of a project. Impact means that the bugs are found earlier, when its cheaper to fix them. Studies have shown that the majority of all faults/failures, including those that impinge on safety, come from missing or misunderstood requirements. Formal Inspection greatly improves the communication within a project and enhances understanding of the system while scrubbing out many of the major errors/defects. For the FI of software requirements, the inspection team should include representatives from Systems Engineering, Operations, Software Design and Code, Software Product Assurance, Safety, and any other system function that software will control or monitor. It is very important that software safety be involved in the FIs. Each individual may review the requirements from a generic viewpoint, or they may be assigned a specific point of view (tester, programmer/designer, user, safety) from which to review the document. It is also very helpful to have inspection checklists for each phase of development that reflect both generic and project specific criteria. The requirements discussed in this section and in Robyn R. Lutz's paper "Targeting Safety-Related Errors During Software Requirements Analysis" [42] will greatly aid in establishing this checklist. Also, the checklists provided in the NASA Software Formal Inspections Guidebook are helpful. The method of reporting findings from FIs is described in references [25] and [36]. In addition to those formats, the software safety engineer might also find it useful to record safety related findings in the format shown in  HYPERLINK \l "table42" Table 4-2. Benefit-to-Cost Rating: HIGH * Formal inspections are also known as Fagan Inspections, named after John Fagan of IBM who devised the method. Table 4-2 Subsystem Criticality Analysis Report Form Document Number: CL-SPEC- 2001 Document Title: Software Requirements Specification - Cosmolab ProgramParagraph Number / TitleRequirements(s) text excerptProblem /Hazard DescriptionRecommendationsHazard Report Reference Number3.3 Limit CheckingParameters listed in Table 3.3 shall be subjected to limit checking at a rate of 1 Hz.Table only gives one set of limits for each parameter, but expected values for parameters will change from mode to mode.During certain modes, false alarms would result because proper parameter values will exceed preset limit check values. Implement table driven limit values which can be changed during transitions from mode to mode.CL-1;9 4.2.6 Test Planning At the end of the specification phase, the test plan can be written. System tests can be defined that verify the functional aspects of the software under nominal conditions, as well as performance, load, stress, and other tests that verify acceptable behavior in non-standard situations. Safety tests of the system should be designed at this time. The tests should demonstrate how the software and system meets the safety requirements in the SRD. Specify pass/fail criteria for each test. Specify any special procedures, constraints, and dependencies for implementing and running safety tests. Describe the review and reporting process for safety-critical components. List test results, problems, and liens. 4.3 Architectural Design Phase The design of a program set represents the static and dynamic characteristics of the software that will meet the requirements specified in the governing SRD. Projects developing large amounts of software may elect to separate design development into separate phases, preliminary (architectural) and detailed/critical. Those with relatively small software packages may combine them into one phase. 4.3.1 Safety Objectives of Architectural Design The main safety objective of the architectural design phase is to define the strategy for achieving the required level of failure tolerance in the different parts of the system. The degree of failure tolerance required can be inversely related to the degree of fault reduction used, (e.g. Formal Methods). However, even the most rigorous level of fault reduction will not prevent all faults, and some degree of failure tolerance is generally required. Ideas, techniques, and approaches to be used during architectural design are as follows: Modularity During this phase the software is usually be partitioned into modules, and the number of safety critical modules should be minimized. Interfaces between critical modules should also be designed for minimum interaction (low coupling). Traceability Requirements previously developed must be flowed down into the architecture, and be traceable. Independence/Failure Tolerance Despite the difficulty to proving software independence, some NASA programs continue to use this approach. As discussed earlier in Section 4.2.2.2 Fault and Failure Tolerance/Independence, two types of independence are often required; first to prevent fault propagation and second to achieve failure tolerance. To achieve failure tolerance for safety critical MWFs and MNWFs, certain Safety Critical Computer Software Components (SCCSCs) must be independent of each other. This usually means operating on different hardware hosts. MNWF: For two in-series inhibits to be independent, no single failure, human mistake, event or environment may activate both inhibits. For three series inhibits to be independent, no two failures, human mistakes, events or environments (or any combination of two single items) may activate all three inhibits. Generally this means that each inhibit must be controlled by a different processor with different software (N-version programming, see below). MWF For two parallel strings to be independent, no single failure may disable both strings. For three parallel strings, no two failures may disable all three strings. In software, N-version programming is preferred in safety critical parallel strings, (i.e., each string is implemented using uniquely developed code). Depending on the design, architectural techniques may include: Convergence testing Majority voting Fault containment regions Redundant Architecture N-Version programming Recovery blocks Resourcefulness Abbott-Neuman Components Self-Checks. For non-discrete continuously varying parameters which are safety critical, a useful redundancy technique is convergence testing or shadowing. A higher level process emulates lower level process(es) to predict expected performances and decide if failures have occurred in the lower level processes. The higher level process implements appropriate redundancy switching when it detects a discrepancy. Alternatively the higher level process can switch to a subset or degraded functional set to perform minimal functions when insufficient redundancy remains to keep the system fully operational. Some redundancy schemes are based on majority voting. This technique is especially useful when the criteria for diagnosing failures are complicated. (e.g. when an unsafe condition is defined by exceeding an analog value rather than simply a binary value). Majority voting requires more redundancy to achieve a given level of failure tolerance, as follows: 2 of 3 achieves single failure tolerance; 3 of 5 achieves two failure tolerance. An odd number of parallel units are required to achieve majority voting. Fault Propagation is a cascading of a software (or hardware or human) error from one module to another. To prevent fault propagation within software, SSCSCs must be fully independent of non-safety critical components and be able to either detect an error within itself and not allow it to be passed on or the receiving module must be able to catch and contain the error. 4.3.1.1 Fault Containment Regions One approach is to establish Fault Containment Regions (FCRs) to prevent propagation of software faults. This attempts to prevent fault propagation such as from non-critical software to SCCSCs; from one redundant software unit to another, or from one SCCSC to another. Techniquesknown such as firewalling orpartitioning come from checks should be used to provide sufficient isolation of FCRs to prevent hazardous fault propagation. FCRs are best partitioned or firewalled by hardware. Leveson (Section 3 Reference [2] ) states that logical firewalls can be used to isolate software modules, such as isolating an application from an operating system. To some extent this can be done using defensive programming techniques and internal software redundancy. (e.g., using authorization codes, or cryptographic keys). However, within NASA this is normally regarded as hazard mitigation, but not hazard control, because such software/logical safeguards can be defeated by hardware failures or EMI/Radiation effects. A typical method of obtaining independence between FCRs is to host them on different/ independent hardware processors. Sometimes it is acceptable to have independent FCRs hosted on the same processor depending on the specific hardware configuration. For example, if the FCRs are stored in separate memory chips and they are not simultaneously or concurrently multitasked in the same Central Processing Unit (CPU) at the same time. Methods of achieving independence are discussed in more detail in Reference [1], "The Computer Control of Hazardous Payloads", NASA/JSC/FDSD, 24 July 1991. FCRs are defined in reference [2], SSP 50038 Computer Based Control System Safety Requirements - International Space Station Alpha. 4.3.1.2 N-Version Programming N-Version Programming is one method that can be used to implement fault tolerant behavior. Multiple, independent versions of the software execute simultaneously. If the answers all agree, then the process continues. If there is disagreement, then a voting method is used to determine which of the answers is correct. In the past, some NASA policy documents have essentially stipulated the use of N-Version programming in any attempt to achieve failure tolerance. Reference [2] discusses in more detail the JSC position on N-Version programming. They recognize that the technique has limitations. Many professionals regard N-Version programming as ineffective, or even counter productive. Efforts to implement N-Version programming should be carefully planned and managed to ensure that valid independence is achieved. In practice applications of N-Version programming on NSTS payloads are limited to small simple functions. However, the NSTS power up of the engines has N-Version programming as well. Note that, by the NSTS 1700.7B stipulation, two processors running the same operating system are neither independent nor failure-tolerant of each other, regardless of the degree of N-Version programming used in writing the applications. In recent years, increasing controversy has surrounded the use of N-Version programming. In particular, Knight and Leveson [13] have jointly reported results of experiments with N-Version programming, claiming the technique is largely ineffective. Within NASA, Butler and Finelli [28] have also questioned the validity of N-version programming, even calling it counter productive. Though it has worked very effectively on some occasions, it should be evaluated carefully before being implemented. One major problem with N-version programming is that it increases complexity, which has a direct relationship with the number of errors. In one NASA study of an experimental aircraft, all of the software problems found during testing were the result of the errors in the redundancy management system. The control software operated flawlessly! Another difficulty with n-version programming is that achieving true independence is very difficult. Even if separate teams develop the software, studies have shown that the software is still often not truly independent. References [2] and [11] give some useful background for N-Version programming, and also for: Recovery blocks Resourcefulness Abbott-Neuman Components Self-Checks 4.3.1.3 Redundant Architecture Redundant architecture refers to having two versions of the operational code. Unlike n-version programming, the two versions do not need to operate identically. The primary software is the high-performance version. This is the regular software you want to run it meets all the required functionality and performance requirements. However, if problems should develop in the high-performance software, particularly problems or failures that impact safety, then a high-assurance kernel (also called a safety kernel) is given control. The high-assurance kernel may have the same functionality as the high-performance software, or may have a more limited scope. The primary aspect is that it is safe. The high-assurance kernel will almost certainly be less optimized (slower, stressed more easily, lower limits on the load it can handle, etc.). The Carnegie Mellon Software Engineering Institute (SEI) Simplex Architecture (Section 7 reference [9]) is an example of a redundant architecture.. This architecture includes the high-performance/high-assurance kernels, address-space protection mechanisms, real-time scheduling algorithms, and methods for dynamic communication among modules. This process requires using analytic redundancy to separate major functions into high-assurance kernels and high-performance subsystems. 4.3.2 Structured Design Techniques It is generally agreed that structured design techniques greatly reduce the number of errors, especially requirements errors which are the most expensive to correct and may have the most impact on the overall safety of a system. These Structured Analysis and Design methods for software have been evolving over the years, each with its approach to modeling the needed world view into software. The most recent analysis/design methods are Object Oriented Analysis & Object Oriented Design (OOA & OOD) and Formal Methods (FM). To date, the most popular analysis methods have been Functional Decomposition, Data Flow (or Structured Analysis), and Information Modeling. OOA actually incorporates some of the techniques of all of these within its method, at lower levels, once the system is cast into objects with attributes and services. In the following discussion, analysis is considered as a process for evaluating a problem space (a concept or proposed system) and rendering it into requirements that reflect the needs of the customer. Functional Decomposition has been, and still is, a popular method for representing a system. Functional Decomposition focuses on what functions, and sub-functions, the system needs to perform and the interfaces between those functions. The general complaints with this method are that 1) the functional capability is what most often changes during the design life-cycle and is thus very volatile, and 2) it is often hard to see the connection between the proposed system as a whole and the functions determined to create that system. Structured Analysis (DeMarco [34], Yourdon [33]) became popular in the 1980s and is still used by many. The analysis consists of interpreting the system concept (or real world) into data and control terminology, that is into data flow diagrams. The flow of data and control from bubble to data store to bubble can be very hard to track and the number of bubbles can get to be extremely large. One approach is to first define events from the outside world that require the system to react, then assign a bubble to that event, bubbles that need to interact are then connected until the system is defined. This can be rather overwhelming and so the bubbles are usually grouped into higher level bubbles. Data Dictionaries are needed to describe the data and command flows and a process specification is needed to capture the transaction/transformation information. The problems have been: 1) choosing bubbles appropriately, 2) partitioning those bubbles in a meaningful and mutually agreed upon manner, 3) the size of the documentation needed to understand the Data Flows, 4) still strongly functional in nature and thus subject to frequent change, 5) though data flow is emphasized, data modeling is not, so there is little understanding of just what the subject matter of the system is about, and 6) not only is it hard for the customer to follow how the concept is mapped into these data flows and bubbles, it has also been very hard for the designers who must shift the data flow diagram organization into a format that can be implemented. Information Modeling, using entity-relationship diagrams, is really a forerunner for OOA. The analysis first finds objects in the problem space, describes them with attributes, adds relationships, refines them into super and sub-types and then defines associative objects. Some normalization then generally occurs. Information modeling is thought to fall short of true OOA in that, according to Peter Coad & Edward Yourdon [37], 1) services, or processing requirements, for each object are not addressed, 2) inheritance is not specifically identified, 3) poor interface structures (messaging) exists between objects, and 4) classification and assembly of the structures are not used as the predominate method for determining the systems objects. This guidebook will present in more detail the two new most promising methods of structured analysis and design, Object-Oriented and Formal Methods (FM). OOA/OOD and FM can incorporate the best from each of the above methods and can be used effectively in conjunction with each other. Lutz and Ampo [27] described their successful experience of using OOD combined with Formal Methods as follows: For the target applications, object-oriented modeling offered several advantages as an initial step in developing formal specifications. This reduced the effort in producing an initial formal specification. We also found that the object-oriented models did not always represent the why, of the requirements, i.e., the underlying intent or strategy of the software. In contrast, the formal specification often clearly revealed the intent of the requirements. 4.3.2.1 Object Oriented Analysis and Design Object Oriented Design (OOD) is gaining increasing acceptance worldwide. OOD methods include those of Coad-Yourdon, Shlaer-Mellor, Rumbaugh, and Booch methods. These fall short of full Formal Methods because they generally do not include logic engines or theorem provers. But they are more widely used than Formal Methods, and a large infrastructure of tools and expertise is readily available to support practical OOD usage. OOA/OOD is the new paradigm and is viewed by many as the best solution to most problems. Some of the advantages of modeling the real world into objects is that 1) it is thought to follow a more natural human thinking process and 2) objects, if properly chosen, are the most stable perspective of the real world problem space and can be more resilient to change as the functions/services and data & commands/messages are isolated and hidden from the overall system. For example, while over the course of the development life-cycle the number, as well as types, of functions (e.g. turn camera 1 on, download sensor data, ignite starter, fire engine 3, etc.) may change, the basic objects (e.g. cameras, sensors, starter, engines, operator, etc.) needed to create a system usually are constant. That is, while there may now be three cameras instead of two, the new Camera-3 is just an instance of the basic object camera. Or while an infrared camera may now be the type needed, there is still a camera and the differences in power, warm-up time, data storage may change, all that is kept isolated (hidden) from affecting the rest of the system. OOA incorporates the principles of abstraction, information hiding, inheritance to the problem space, which are the three most human means of classification. These combined principles, if properly applied, establish a more modular, bounded, stable and understandable software system. These aspects of OOA should make a system created under this method more robust and less susceptible to changes, properties which help create a safer software system design. Abstraction refers to concentrating on only certain aspects of a complex problem, system, idea or situation in order to better comprehend that portion. The perspective of the analyst focuses on similar characteristics of the system objects that are most important to them. Then, at a later time, the analyst can address other objects and their desired attributes or examine the details of an object and deal with each in more depth. Data abstraction is used by OOA to create the primary organization for thinking and specification in that the objects are first selected from a certain perspective and then each object is defined in detail. An object is defined by the attributes it has and the functions it performs on those attributes. An abstraction can be viewed, as per Shaw [38], as a simplified description, or specification, of a system that emphasizes some of the systems details or properties while suppressing others. A good abstraction is one that emphasizes details that are significant to the reader or user and suppresses details that are, at least for the moment, immaterial or diversionary. Information hiding also helps manage complexity in that it allows encapsulation of requirements which might be subject to change. In addition, it helps to isolate the rest of the system from some object specific design decisions. Thus, the rest of the s/w system sees only what is absolutely necessary of the inner workings of any object. Inheritance defines a relationship among classes [objects], wherein one class shares the structure or behavior defined in one or more classes. Inheritance thus represents a hierarchy of abstractions, in which a subclass [object] inherits from one or more superclasses [ancestor objects]. Typically, a subclass augments or redefines the existing structure and behavior of its superclasses [39]. Classification theory states that humans normally organize their thinking by: looking at an object and comparing its attributes to those experienced before (e.g. looking at a cat, humans tend to think of its size, color, temperament, etc. in relation to past experience with cats) distinguishing between an entire object and its component parts (e.g. a rose bush versus its roots, flowers, leaves, thorns, stems, etc.) classification of objects as distinct and separate groups (e.g. trees, grass, cows, cats, politicians) In OOA, the first organization is to take the problem space and render it into objects and their attributes (abstraction). The second step of organization is into Assembly Structures, where an object and its parts are considered. The third form of organization of the problem space is into Classification Structures during which the problem space is examined for generalized and specialized instances of objects (inheritance). That is, if looking at a railway system the objects could be engines (provide power to pull cars), cars (provide storage for cargo), tracks (provide pathway for trains to follow/ride on), switches (provide direction changing), stations (places to exchange cargo), etc. Then you would look at the Assembly Structure of cars and determine what was important about their pieces parts, their wheels, floor construction, coupling mechanism, siding, etc. Finally, Classification Structure of cars could be into cattle, passenger, grain, refrigerated, and volatile liquid cars. The purpose of all this classification is to provide modularity which partitions the system into well defined boundaries that can be individually/independently understood, designed, and revised. However, despite classification theory, choosing what objects represent a system is not always that straight forward. In addition, each analyst or designer will have their own abstraction, or view of the system which must be resolved. Shlaer and Mellor [40], Jacobson [41], Booch [39], and Coad and Yourdon [37] each offer a different look at candidate object classes, as well as other aspects of OOA/OOD. These are all excellent sources for further introduction (or induction) into OOA and OOD. OO does provide a structured approach to software system design and can be very useful in helping to bring about a safer, more reliable system. While there is a growing number of OO gurus with years of practical experience, many OO projects are implemented by those with book-knowledge and little direct experience. It is important not to take everything written in the OOA/OOD books as the only correct way to do things. Adaptation of standard methods may be important in your environment. As an example, the team of software designers who worked on the Mars Pathfinder mission [38] decided to use Object Oriented Design, though their developers had only book-knowledge of the methodology. Attempting to follow the design methodologies verbatim lead to a rapidly increasingly complex set of objects. The team eventually modified the design methodology by combining the bottom up approach they had been using with a more top down division into subsystems. 4.3.2.2 Unified Modeling Language (UML) The Unified Modeling Language (UML) is a methodology and language for specifying, visualizing, and documenting the development artifacts (design) of an object-oriented system. The UML represents the unification of the Booch, Objectory, and OMT methods and is their direct and upwardly compatible successor. It also incorporates ideas from a many other methodologists, including Coad, Gamma, Mellor, Shlaer, and Yourdon. UML is a graphical modeling language, using a variety of diagrams and charts to show the structure and relationships of an object-oriented design. Class diagrams show the individual classes and how they relate to each other, such as being contained within another class. Each class box can contain some or all of the attributes (data) and operations (methods) of the class. Relationships among classes come from the following set: Associations between classes means that they communicate via messages (calling each others methods). Aggregations are a specialized association, where one class owns the other. Compositions show that one class is included within another class. Generalizations represent an inheritance relationship between the classes. Dependencies are similar to associations, but while one class depends on another, it doesnt contain a pointer or reference to the other class. Realizations are relationships where one modeling element is the implementation (realization) of another. The major features of UML include: Use cases and scenarios Object and class models State charts and other behavioral specifications Large-scale structuring Design patterns Extensibility mechanisms The types of diagrams used by UML include: Use-case diagrams Class diagrams State-machine diagrams Message-trace diagrams Object-message diagrams Process diagrams Module diagrams Platform diagrams UML is quickly becoming the standard OO modeling language. Tools already incorporate it, and some can even generate code directly from the UML diagrams. UML has been adapted for real-time systems. Many books now exist for learning UML, as well as on applying UML to specific environments or integrating it with other design methodologies. 4.3.3 Selection of COTS and Reuse During the architectural design phase, and even earlier, decisions are made to select Off-The-Shelf (OTS) items (software, hardware, or both) that are available as is from a commercial source (Commercial Off-The-Shelf (COTS)) or to reuse applications developed from other similar projects (i.e., Government Off-The-Shelf (GOTS) items). Any modifications of these items place them in another category Modified Off-the-Shelf (MOTS) items. OTS items commonly used include operating systems, processor and device microcode, and libraries of functions. It is becoming prohibitively expensive to custom develop software for these applications. In addition, the desire to not reinvent the wheel is strong, especially when faced with budget and schedule constraints. There is also a strong trend in government to use commercial products, instead of custom developing similar but much more expensive products. HYPERLINK \l "_7.1_Off-the-Shelf_Software"Section 7.1 Off-the-Shelf Software covers the pros and cons of OTS and reused software in more detail. Many issues need to be considered before making the decision to use OTS software, or to reuse software from a previous project. While OTS software may appear cost-effective, the additional analyses, tests, glueware code development, and other activities may make it more expensive than developing the software in-house. The section also provided recommendations for additional analyses and tests for OTS software in safety-critical systems. 4.3.4 Selection of development tools and operating systems It is at the architectural design phase that the tools to develop the software, as well as the operating system it will run on, are often selected. The choice of tools/OS can have an impact on the safety of the software. Some operating systems have more safety features than others. Some tools make finding errors easier. Suggestions for what to look for when selecting an operating system, programming language, and development tool are included in  HYPERLINK \l "_6._Programming_Languages_1"  Section 6 SOFTWARE DEVELOPMENT ISSUES . 4.3.5 Coding Standards Coding Standards can be considered a class of generic software requirements that indicate what software constructs, library functions, and other language-specific information must or must not be used. As such, they are, in practice, safe subsets of programming languages. Coding standards may be developed by the software designer, based on the software and hardware system to be used, or may be general standards for a safer version of a particular language. How compilers work internally (convert the higher level language into machine operations) is often undefined and highly variable between compilers. For example, how dynamic memory allocation is implemented is not part of most language specifications, and therefore varies between compilers. Even for a specific compiler, information on how such a process is implemented is difficult to obtain. The location of the allocated memory is usually not predictable, and that may not be acceptable for a safety critical software module. Another example is the order that global items are initialized. Coding standards can be used to make sure that no global item depends on another global item having been already initialized. It is important that all levels of the project agree to the coding standards, and that they are enforced. If the programmers disagree, they may find ways to circumvent it. Safety requires the cooperation of everyone. Include those who will actually do the programming, as well as software designers and software leads, in any meetings where coding standards will be discussed. Coding standards may also contain requirements for how the source code is to look, such as indentation patterns and comment formats. However, it is best to separate these requirements into a separate coding style document. This avoids the problem of developers shelving the coding standards, because they disagree with the coding style. While trivial in many ways, coding style requirements help make the software more readable by other developers. In addition, they make it more difficult for typical errors to be inserted in the code, and easier for them to be found during an inspection. Create a checklist from the agreed-upon coding standard, for use during software formal inspections and informal reviews. Enforce conformance to the standard during the inspections. Do not rate style issues as highly as safety issues. In fact, style issues can be ignored, unless they seriously impact on the readability of the code, or the project decides that they must be enforced. Benefit-to-Cost Rating: HIGH 4.3.6 Test Plan Update At this development phase, the main modules (units) of the software have been defined. This is the time to determine the integration order of the units, and the integration tests that will be run. Update the Test Plan with this information. 4.4 Detailed Design Phase The following tasks during the detailed design phases should support software safety activities. Program Set Architecture Show positions and functions of safety-critical modules in design hierarchy. Identify interfaces of safety-critical components. Identify hazardous operations scenarios. Internal Program Set Interfaces Include information on functional interfaces of safety-critical modules. Include low level requirements and designs of these interfaces. Identify Shared Data Identify databases/data files which contain safety-critical data and all modules which use them, safety-critical or not. Document how this data is protected from inadvertent use or changes by non-safety-critical modules. Functional Allocation Document how each safety-critical module can be traced back to original safety requirements and how the requirement is implemented. Specify safety-related design and implementation constraints. Document execution control, interrupt* characteristics, initialization, synchronization, and control of the modules. Include any finite state machines. * For high risk systems, interrupts should be avoided as they may interfere with software safety controls developed especially for a specific type of hazard. Any interrupts used should be priority based. Error Detection and Recovery Specify any error detection or recovery schemes for safety-critical modules. Include response to language generated exceptions; also responses to unexpected external inputs, e.g. inappropriate commands, or out-of-limit measurements. Inherited or Reused Software and COTS Describe the results of hazard analyses performed on COTS or inherited or reused software. Ensure that adequate documentation exists on all software where it is to be used in critical applications. Design Feasibility, Performance, and Margins Show how the design of safety-critical modules is responsive to safety requirements. Include information from any analyses of prototypes or simulations. Define design margins of these modules. Integration Specify any integration constraints or caveats resulting from safety factors. Show how safety controls are not compromised by integration efforts. Interface Design For each interface specify a design that meets the safety requirements in the ICD, SIS document of equivalent. Minimize interfaces between critical modules. Identify safety-critical data used in interfaces. Modularity New modules and sub-modules shall be categorized as safety critical or not. Traceability For each of the above, identify traceability to safety requirements. Testing Identify test and/or verification methods for each safety critical design feature in the software test plan and procedures. Results of preliminary tests of prototype code should be evaluated and documented in the Software Development Folders (SDFs). Any Safety Critical findings should be reported to the Safety Engineer to help work out any viable solutions. 4.5 Software Implementation It is during software implementation (coding) that software controls of safety hazards are actually implemented. Safety requirements have been passed down through designs to the coding level. Managers and designers must communicate all safety issues relating to the program sets and code modules they assign to programmers. Safety-critical designs and coding assignments should be clearly identified. Programmers must recognize not only the explicit safety-related design elements but should also be cognizant of the types of errors which can be introduced into non-safety-critical code which can compromise safety controls. Coding checklists should be provided to alert for these common errors. This is discussed in the next section. 4.5.1 Coding Checklists Software developers should use coding checklists in software coding and implementation. The coding checklists should be generated by an overall ongoing Specification Activity beginning in the requirements phase, (see  HYPERLINK \l "_5.1.3_Specification_Analysis" Section 5.1.3 Specification Analysis). Checklists should contain questions that can serve as reminders to programmers to look for common defects. Safety Checklists During this phase, software safety checklists should be used to verify that safety requirements identified earlier in the design process (as described in previous  HYPERLINK \l "_4.2.1_Development_of" Section 4.2.1 Development of Software Safety Requirements, have, in fact, been flowed into the software detailed design. Code Review Checklists These checklists are for use by the developer, when reviewing her own code or the code of a fellow developer. The checklist should include common errors as well as the coding standards. Personalize the checklist to include individual common errors, such as forgetting to end a comment block. This serves as a means to find those errors, and as reminder not to make the same mistakes. Additional, more formal checklists should be used during Formal Inspections. These will emphasize the coding standards and the most common errors. Common errors may be gleaned from the individual developers, from this guidebook, or from other sources such as textbooks and articles. Updating requirements Often during this development phase, missing requirements are identified, or new system requirements are added and flowed down to software, such as fault detection and recovery. It may become apparent that various capabilities were assumed, but not explicitly required, so were not implemented. Checklists can help identify these missing requirements. Once missing requirements are identified they must be incorporated by back-filling (updating) the requirements specifications prior to implementation in order to maintain proper configuration control. This is less likely to be necessary if Formal Methods or Formal Inspections are used, at least during the requirements phase. Benefit-to-Cost Rating: HIGH 4.5.2 Defensive Programming Hazards can be mitigated (but not controlled) using defensive programming techniques. This incorporates a degree of fault/failure tolerance into the code, sometimes by using software redundancy or stringent checking of input and output data and commands. However, software alone cannot achieve true system redundancy and failure tolerance. Appropriately configured hardware is necessary. An example of defensive programming is sometimes called come from checks. Critical routines have multiple checks in them to test whether they should be executing at some particular instant. One method is for each preceding process to set a flag in a word. If all the proper criteria are met then the routine in question is authorized to execute.  HYPERLINK \l "_6.15_Good_Programming" Section 6.15 Good Programming Practices for Safety provides more examples of defensive programming techniques. Benefit-to-Cost Rating: HIGH 4.5.3 Refactoring Refactoring is a technique to restructure object-oriented code in a disciplined way. It is the process of taking an object design and rearranging it in various ways to make the design more flexible and/or reusable. There are several reasons you might want to do this, efficiency and maintainability being probably the most important. The term refactoring comes from mathematics, where you factor an expression into an equivalence. The factors are cleaner ways of expressing the same statement. In software refactoring, there must also be equivalence; the beginning and end products must be functionally identical. Practically, refactoring means making code clearer, cleaner, simpler and elegant. Refactoring is a good thing, because complex expressions are typically built from simpler, more easily understood components. Refactoring either exposes those simpler components or reduces them to the more efficient complex expression (depending on which way you are going). A common method of refactoring is to refactor along inheritance lines. For instance, in a design review you find out that two classes in your system that do not share a common superclass both implement very similar or identical behavior. It would be advantageous to then refactor these two classes by moving the common behavior into a shared superclass, and then changing the classes so that they descend from that class. You can also refactor along composition lines. If you find that a class is implementing two different sets of responsibilities that do not interact with each other much, or that use two subsets of the attributes of the original class, you may want to refactor that class into two different classes, one of which perhaps contains the other. You also want to refactor when the code is broken, but it isn't broken on the particular axis called "what results the program computes". Some of the defects that can be fixed by refactoring are: Unify duplicate code Make the code read better (typically introducing more "why") Remove unused indirection Isolate existing logic from a needed change Replace one algorithm with another Make the program run faster Be careful if using refactoring, however. Regression tests should show if the refactored unit is functionally identical to the original. If it is not, then the code has been broken by the refactoring, or was broken originally and is now corrected. Either way, you now have a problem that needs to be fixed. Too much refactoring may also invalidate code inspections that were done on the original code. Refactoring safety critical units should be followed by a re-inspection of the code, preferably by the original Formal Inspection team. Benefit-to-Cost Rating: MEDIUM 4.5.4 Unit Level Testing Test planning starts in the specification phase. This is where the system functional tests are developed, at a high level of understanding. The functional test execution begins at the end of integration testing. Integration test planning begins during the design phase, when the individual units are defined. Integration tests are executed once the actual unit integration begins. Integration and functional testing are described below in Section 4.6. Unit level testing is planned during the detailed design phase, when the functions within a unit are defined, and executed once the code compiles. Unit level testing is important because it can access levels of the software that might not be reachable once the units are integrated. This testing is usually performed by the developer, though another developer in the group may perform the unit testing. A basic entry criteria for unit testing is that the unit compile without errors. Unit level testing can identify implementation problems requiring changes to the software. Unit level tests come in two varieties: white-box and black-box. White-box tests include all those where you must know something about the innards of the module. Black-box tests check the inputs/outputs of the module only, and arent concerned about what happens inside. White-box tests include those that check the path/branch coverage, loop implementation and statement execution. Black-box tests look at the input domain (values), output ranges, and error handling. Path tests verify that each path through the unit has been tested. Each decision point (if statement, case statement, etc.) leads to two or more paths. Not every combination of paths needs to be tested (that would take too long), but each path decision should be tested for all valid possibilities, and the paths they lead to. Statement execution means that each statement in the program is executed at least once. This is usually done as part of the path tests, but separate tests can be run as well. Loops are tested by bypassing them, one pass through, a typical number of loops, one less than the maximum, the maximum number of loops, and one more than the maximum. Many of these tests are at the boundaries of the loop range, which are common sources of problems. Often loops are executed one too few or one two many times. Boundary values are also used in the input black-box tests. In these, the values that can be input to the unit are tested at typical values, the boundaries, and invalid values. In addition, black-box tests can be created to test the capability, stability, and graceful behavior under error conditions of the unit. Safety tests should be designed for any safety critical units. These tests should demonstrate coverage of the safety test requirements (see HYPERLINK \l "_4.6.7_Software_Safety"Section 4.6.7 Software Safety Testing). Each safety test should have pass/fail criteria. The test plan or software management plan should describe the review and reporting process for safety-critical components, including how to report problems discovered in unit test. A test report should be written for unit tests of safety critical items. Unit tests for object-oriented software consists of testing the class methods in the same way that procedures and functions are tested in structured programs. Construction, destruction, and copying of the class should also be tested. It is good practice for the developer to keep a list of bugs found while unit testing. This list is a learning tool for the developer to discover what his common errors are. Feed those errors back into the code review checklist, so they are found earlier (at review, rather than test) in the development cycle. In a team where the developers are not penalized for the defects they find, sharing the bug list with other developers and the QA department can help in education and process improvement. Developers can learn from the mistakes of others, and the metrics derived from them can help identify areas to focus on when trying to improve the software development process. Benefit-to-Cost Rating: HIGH 4.6 Software Integration and Test Software testing beyond the unit level is usually handled by someone other than the developer, except in the smallest of teams. These tests are almost exclusively black-box tests, where the inner workings of the units are unknown. All that is known is the interface (inputs, outputs, and their ranges). The test phases are: Integration Testing Unit integration and testing Integration of the software with the hardware System Testing Functional testing Performance testing Load testing Stress testing Disaster testing Stability testing Acceptance testing Red Team testing Software testing verifies analysis results, investigates program behavior, and confirms that the program complies with safety requirements. Testing is the operational execution of a software component in a real or simulated environment. Integration testing is often done in a simulated environment, and system testing is usually done on the actual hardware. However, hazardous commands or operations should be tested in a simulated environment first. You dont want to start the rocket engines or set off the ordnance by accident! Testing, conducted in accordance with the safety test plan and procedures, will verify that the software meets safety requirements. Normally, the software testing ensures that the software performs all required functions correctly, and can exhibit graceful behavior under anomalous conditions. Safety testing focuses on locating program weaknesses and identifying extreme or unexpected situations that could cause the software to fail in ways that would violate safety requirements. Safety testing complements rather than duplicates developer testing. Fault injection has been successfully used to test critical software (e.g. TUV in Germany). Faults are inserted into code before starting a test and the response is observed. In addition, all boundary and performance requirements should be tested at, below and above the stated limits. It is necessary to see how the software system performs, or fails, outside of supposed operational limits. The safety testing effort should be limited to those software requirements classed as safetycritical items. In addition, if the safety-critical items are separated from the others via a partition or firewall, the integrity of the partitioning must be tested. Safety testing can be performed as an independent series of tests or as an integral part of the developer's test effort. However, remember that any software which impacts or helps fulfill a safety critical function is safety critical as well. Any problems discovered during testing should be analyzed and documented in discrepancy reports as well as test reports. Discrepancy reports contain a description of the problems encountered, recommended solutions, and the final disposition of the problem. In addition, defect information may be kept in a defect tracking database. Such a database not only allows tracking of problems for a particular project, but can serve as a source for lessons learned and improvement for subsequent projects. 4.6.1 Testing Techniques All of this section is taken, with permission, from the Frequently Asked Questions (FAQ) created by Rick Hower, 1996-2000. The website is Software QA and Testing Frequently-Asked-Questions,  HYPERLINK "http://www.softwareqatest.com/" http://www.softwareqatest.com/, and is an excellent introduction to software testing. What kinds of testing should be considered? Black box testing - not based on any knowledge of internal design or code. Tests are based on requirements and functionality. White box testing - based on knowledge of the internal logic of an application's code. Tests are based on coverage of code statements, branches, paths, conditions. unit testing - the most 'micro' scale of testing; to test particular functions or code modules. Typically done by the programmer and not by testers, as it requires detailed knowledge of the internal program design and code. Not always easily done unless the application has a well-designed architecture with tight code; may require developing test driver modules or test harnesses. incremental integration testing - continuous testing of an application as new functionality is added; requires that various aspects of an application's functionality be independent enough to work separately before all parts of the program are completed, or that test drivers be developed as needed; done by programmers or by testers. integration testing - testing of combined parts of an application to determine if they function together correctly. The 'parts' can be code modules, individual applications, client and server applications on a network, etc. This type of testing is especially relevant to client/server and distributed systems. functional testing - black-box type testing geared to functional requirements of an application; this type of testing should be done by testers. This doesn't mean that the programmers shouldn't check that their code works before releasing it (which of course applies to any stage of testing.) system testing - black-box type testing that is based on overall requirements specifications; covers all combined parts of a system. end-to-end testing - similar to system testing; the 'macro' end of the test scale; involves testing of a complete application environment in a situation that mimics real-world use, such as interacting with a database, using network communications, or interacting with other hardware, applications, or systems if appropriate. sanity testing - typically an initial testing effort to determine if a new software version is performing well enough to accept it for a major testing effort. For example, if the new software is crashing systems every 5 minutes, bogging down systems to a crawl, or destroying databases, the software may not be in a 'sane' enough condition to warrant further testing in its current state. regression testing - re-testing after fixes or modifications of the software or its environment. It can be difficult to determine how much re-testing is needed, especially near the end of the development cycle. Automated testing tools can be especially useful for this type of testing. acceptance testing - final testing based on specifications of the end-user or customer, or based on use by end-users/customers over some limited period of time. load testing - testing an application under heavy loads, such as testing of a web site under a range of loads to determine at what point the system's response time degrades or fails. stress testing - term often used interchangeably with 'load' and 'performance' testing. Also used to describe such tests as system functional testing while under unusually heavy loads, heavy repetition of certain actions or inputs, input of large numerical values, large complex queries to a database system, etc. performance testing - term often used interchangeably with 'stress' and 'load' testing. Ideally 'performance' testing (and any other 'type' of testing) is defined in requirements documentation or QA or Test Plans. usability testing - testing for 'user-friendliness'. Clearly this is subjective, and will depend on the targeted end-user or customer. User interviews, surveys, video recording of user sessions, and other techniques can be used. Programmers and testers are usually not appropriate as usability testers. install/uninstall testing - testing of full, partial, or upgrade install/uninstall processes. recovery testing - testing how well a system recovers from crashes, hardware failures, or other catastrophic problems. security testing - testing how well the system protects against unauthorized internal or external access, willful damage, etc; may require sophisticated testing techniques. compatibility testing - testing how well software performs in a particular hardware/software/operating system/network/etc. environment. user acceptance testing - determining if software is satisfactory to an end-user or customer. comparison testing - comparing software weaknesses and strengths to competing products. alpha testing - testing of an application when development is nearing completion; minor design changes may still be made as a result of such testing. Typically done by end-users or others, not by programmers or testers. beta testing - testing when development and testing are essentially completed and final bugs and problems need to be found before final release. Typically done by end-users or others, not by programmers or testers. What steps are needed to develop and run software tests? The following are some of the steps to consider: Obtain requirements, functional design, and internal design specifications and other necessary documents Obtain budget and schedule requirements Determine project-related personnel and their responsibilities, reporting requirements, required standards and processes (such as release processes, change processes, etc.) Identify application's higher-risk aspects, set priorities, and determine scope and limitations of tests Determine test approaches and methods - unit, integration, functional, system, load, usability tests, etc. Determine test environment requirements (hardware, software, communications, etc.) Determine testware requirements (record/playback tools, coverage analyzers, test tracking, problem/bug tracking, etc.) Determine test input data requirements Identify tasks, those responsible for tasks, and labor requirements Set schedule estimates, timelines, milestones Determine input equivalence classes, boundary value analyses, error classes Prepare test plan document and have needed reviews/approvals Write test cases Have needed reviews/inspections/approvals of test cases Prepare test environment and testware, obtain needed user manuals/reference documents/configuration guides/installation guides, set up test tracking processes, set up logging and archiving processes, set up or obtain test input data Obtain and install software releases Perform tests Evaluate and report results Track problems/bugs and fixes Retest as needed Maintain and update test plans, test cases, test environment, and testware through life cycle What's a 'test case'? A test case is a document that describes an input, action, or event and an expected response, to determine if a feature of an application is working correctly. A test case should contain particulars such as test case identifier, test case name, objective, test conditions/setup, input data requirements, steps, and expected results. Note that the process of developing test cases can help find problems in the requirements or design of an application, since it requires completely thinking through the operation of the application. For this reason, it's useful to prepare test cases early in the development cycle if possible. What should be done after a bug is found? The bug needs to be communicated and assigned to developers that can fix it. After the problem is resolved, fixes should be re-tested, and determinations made regarding requirements for regression testing to check that fixes didn't create problems elsewhere. If a problem-tracking system is in place, it should encapsulate these processes. A variety of commercial problem-tracking/management software tools are available (see the 'Tools' section for web resources with listings of such tools). The following are items to consider in the tracking process: Complete information such that developers can understand the bug, get an idea of its severity, and reproduce it if necessary. Bug identifier (number, ID, etc.) Current bug status (e.g., 'Released for Retest', 'New', etc.) The application name or identifier and version The function, module, feature, object, screen, etc. where the bug occurred Environment specifics, system, platform, relevant hardware specifics Test case name/number/identifier One-line bug description Full bug description Description of steps needed to reproduce the bug if not covered by a test case or if the developer doesn't have easy access to the test case/test script/test tool Names and/or descriptions of file/data/messages/etc. used in test File excerpts/error messages/log file excerpts/screen shots/test tool logs that would be helpful in finding the cause of the problem Severity estimate (a 5-level range such as 1-5 or 'critical'-to-'low' is common) Was the bug reproducible? Tester name Test date Bug reporting date Name of developer/group/organization the problem is assigned to Description of problem cause Description of fix Code section/file/module/class/method that was fixed Date of fix Application version that contains the fix Tester responsible for retest Retest date Retest results Regression testing requirements Tester responsible for regression tests Regression testing results A reporting or tracking process should enable notification of appropriate personnel at various stages. For instance, testers need to know when retesting is needed, developers need to know when bugs are found and how to get the needed information, and reporting/summary capabilities are needed for managers. What if there isn't enough time for thorough testing? Use risk analysis to determine where testing should be focused. Since it's rarely possible to test every possible aspect of an application, every possible combination of events, every dependency, or everything that could go wrong, risk analysis is appropriate to most software development projects. This requires judgment skills, common sense, and experience. (If warranted, formal methods are also available.) Considerations can include: Which functionality is most important to the project's intended purpose? Which functionality is most visible to the user? Which functionality has the largest safety impact? Which functionality has the largest financial impact on users? Which aspects of the application are most important to the customer? Which aspects of the application can be tested early in the development cycle? Which parts of the code are most complex, and thus most subject to errors? Which parts of the application were developed in rush or panic mode? Which aspects of similar/related previous projects caused problems? Which aspects of similar/related previous projects had large maintenance expenses? Which parts of the requirements and design are unclear or poorly thought out? What do the developers think are the highest-risk aspects of the application? What kinds of problems would cause the worst publicity? What kinds of problems would cause the most customer service complaints? What kinds of tests could easily cover multiple functionalities? Which tests will have the best high-risk-coverage to time-required ratio? 4.6.2 Test Setups and Documentation Testing should be performed either in a controlled environment in which execution follows a structured test procedure and the results are monitored, or in a demonstration environment where the software is exercised without interference. Controlled testing executes the software on a real or a simulated computer using special techniques to influence behavior. This is the usual mode of testing, where a test procedure (script) is developed and followed, and the results are noted. Automatic testing is also included in this category. All of the integration and system tests that will be discussed in the following sections are controlled tests. When using a simulator, rather than the real system, the fidelity of the simulators should be carefully assessed. How close is the simulator to the real thing? How accurate is the simulator? Has the simulator itself been verified to operate correctly? Demonstration testing executes the software on a computer and in an environment identical to the operational computer and environment. Demonstrations may be used in the acceptance test, to show the user how the system works. Autonomous systems, where the internal operation is completely under the control of the software, would also be demonstrated, especially for the acceptance test. Safety testing exercises program functions under both nominal and extreme conditions. Safety testing includes nominal, stress, performance, load and error-handling testing. These tests are discussed in  HYPERLINK \l "_4.6.4_System_Testing" Section 4.6.5 System Tests. Additional safety tests may be developed to test the software under specific conditions (e.g. unexpected loss of power) or for specific results (door doesnt open when the furnace is on, no matter what command is issued to the software) Configuration Management should act as the sole distributor of media and documentation for all system tests and for delivery to [sub]system integration and testing. Pulling the latest program off the developers machine is not a good idea. One aspect of system testing is repeatability, which can only be assured if the software under test comes from a known, and fixed, source. Software Test Reports should incorporate software safety information in the following sections: Unit Testing Results Report results from unit tests in the Software Development Folders. Ideally, all results should be documented, even if just in notes in a log book. Safety critical units should have more formal documentation (test reports). System Test Reports Report results of testing safety-critical interfaces versus the requirements outlined in the Software Test Plan. Any Safety Critical findings should be used to update the hazard reports. 4.6.3 Integration Testing Integration is the process of piecing together the puzzle, where each piece is a software unit. The end result is a complete system, and the final integration test is essentially the first system functional test. The order of integration of the units should be decided at the end of the architectural design, when the units are identified. Various schemes can be used, such as creating a backbone (minimal functionality) and adding to it, or doing the most difficult modules first. Keep in mind the hardware schedule when deciding the order of integration. Late hardware may hold up software integration, if the software that needs it is integrated early in the cycle. Stubs and drivers are used to simulate the rest of the system, outside of the integrated section. Stubs represent units below (called by) the integrated section. Drivers represent the part of the software that calls the integrated section. Integration tests are black-box tests that verify the functionality of the integrated unit. They are higher level black-box unit tests, where the unit is the new, integrated whole. Special safety tests may be run as safety critical units are integrated. These tests should exercise functionality that may be unavailable once the system is completely integrated. Also, some safety tests may be run early, so that any problems can be corrected before development is completed. 4.6.4 Object Oriented Testing Object-oriented software requires some changes in the testing strategy, prior to the full system tests. Once the software is fully integrated, it doesnt matter what the underlying software design is. A system is a system. However, when the system is integrated, whole classes are added at a time. Besides the normal functional tests that are performed during integration testing, consider the following tests as well: Object A creates Object B, invoking Bs constructor A deletes B, invoking Bs destructor. Check for memory leaks here! A sends a message to B (invokes a method of B). Check for situations where B is not present (not created or already destroyed). How does the error handling system deal with that situation? As each class is integrated into the system, it is the interfaces with other classes that must be tested. Object-oriented testing methods have not reached the maturity of more traditional testing. The best way to test OO software is highly debated. The following resources provide some useful insights or links regarding OO testing: Testing Object-Oriented Software (book), by David C. Kung, Pei Hsia, and Jerry Gao, October 1998. ISBN 0-8186-8520-4 Practical Techniques for Testing Objects, Powerpoint Presentation,  HYPERLINK "http://www.cigital.com/presentations/testing_objects/sld001.htm" http://www.cigital.com/presentations/testing_objects/sld001.htm Bibliography: Testing Object-Oriented Software  HYPERLINK "http://www.rbsc.com/pages/ootbib.html" http://www.rbsc.com/pages/ootbib.html Cetus links on Object-Oriented Testing:  HYPERLINK "http://www.cetus-links.org/oo_testing.html" http://www.cetus-links.org/oo_testing.html Myths about OO testing:  HYPERLINK "http://www.rbsc.com/pages/myths.html" http://www.rbsc.com/pages/myths.html State of the art in 1995:  HYPERLINK "http://www.stsc.hill.af.mil/crosstalk/1995/apr/testinoo.asp" http://www.stsc.hill.af.mil/crosstalk/1995/apr/testinoo.asp 4.6.5 System Testing System testing begins when the software is completely integrated. Several types of tests are usually run. Not every test is useful for every system, and the software developer should choose those that test specific requirements or that may show problems with the system. Functional Testing consists of running the system in a nominal manner. The system is verified to perform all the functions specified in the requirements, and to not perform functions that are designated must not work. A complete, end-to-end functional test is often designated as the System Test, though system testing actually encompasses many more types of tests. A scaled-down version of the functional test is often used as the acceptance test. Benefit-to-Cost Rating: HIGH Stress tests are designed to see how much the system can handle before it breaks down. While a capacity test (performance) may test that the system can store the required number of files, a stress test will see just how many files can be stored before the disk runs out of room. Aspects of the system that might be stressed are CPU usage, I/O response time, paging frequency, memory utilization, amount of available memory, and network utilization. The closer a systems peak usage is to the breakdown point, the more likely it is that the system will fail under usage. Give yourself adequate margin, if at all possible. Benefit-to-Cost Rating: HIGH Stability tests look for sensitivity to event sequences, intermittent bad data, memory leakage, and other problems that may surface when the system is operated for an extended period of time. Benefit-to-Cost Rating: HIGH Resistance to Failure tests how gracefully the software responds to errors. Errors should be detected and handled locally. Erroneous user input should be handled appropriately (e.g. ignored with an error message), as well as bad input from sensors or other devices. Fault injection tests fit in this category. Benefit-to-Cost Rating: HIGH Compatibility tests verify that the software can work with the hardware and other software systems it was designed to interface with. Benefit-to-Cost Rating: HIGH Performance testing verifies the CARAT parameters Capacity, Accuracy, Response time, Availability, and Throughput. Capacity is the number of records, users, disk space, etc. Accuracy is the verification of algorithm results and precision. Response time is often important in real-time systems, and will be included in the specifications. Availability is how long the system can be used (based on how often it fails, and how long to repair it when it does fail). Throughput is the peak or average number of events per unit time that the system can handle. Load tests are a form of performance testing. Benefit-to-Cost Rating: HIGH Disaster testing checks the softwares response to physical hardware failure. Pulling the power plug while the software is running is one example. Disaster tests point to areas where the software needs fixing, or where additional hardware is needed (such as a backup battery, to allow the software to shut down gracefully). Benefit-to-Cost Rating: MEDIUM Installation test shows that the software can be installed successfully. Benefit-to-Cost Rating: MEDIUM Red Team testing is a totally unscripted, break the code type of testing. It is only performed after all other testing is completed, and in an environment where a true safety problem cannot develop (such as on a simulator). The testers do whatever they want to the software except actually break the hardware it runs on (or related hardware). This is a random test. Successful completion suggests that the program is robust. Failure indicates that something needs to be changed, either in the software or in the operating procedures. Benefit-to-Cost Rating: LOW (but fun!) 4.6.6 Regression Testing Whenever changes are made to the system after it is baselined (first release), a regression test must be run to verify that previous functionality is not affected and that no new errors have been added. This is vitally important! Fixed code may well add its own set of errors to the code. If the system is close to some capacity limit, the corrected code may push it over the edge. Performance issues, race conditions, or other problems that were not evident before may be there now. If time permits, the entire suite of system and safety tests would be rerun as the complete regression test. However, for even moderately complex systems, such testing is likely to be too costly in time and money. Usually, a subset of the system tests make up the regression test suite. Picking the proper subset, however, is an art and not a science. Minimization is one approach to regression test selection. The goal is to create a regression test suite with the minimal number of tests that will cover the code change and modified blocks. The criteria for this approach is coverage what statements are executed by the test. In particular, every statement in the changed code must be executed, and every modified block must have at least one test. Coverage approaches are based on coverage criteria, like the minimization approach, but they are not concerned about minimizing the number of tests. Instead, all system tests that exercise the changed or affected program component(s) are used. Safe approaches place less emphasis on coverage criteria, and attempt instead to select every test that will cause the modified program to produce different output than the original program. Safe regression test selection techniques select subsets that, under certain well-defined conditions, exclude no tests (from the original test suite) that if executed would reveal faults in the modified software.  HYPERLINK \l "_5.4.11_Program_Slicing" Program slicing can be a helpful technique for determining what tests to run. Slicing finds all the statements that can affect a variable, or all statements that a variable is involved with. Depending on the changes, slicing may be able to show what modules may be affected by the modification. Whatever strategy is used to select the regression tests, it should be a well thought out process. Balance the risks of missing an error with the time and money spent on regression testing. Very minor code changes usually require less regression testing, unless they are in a very critical area of the software. Also consider including in the regression suite tests that previously found errors, tests that stress the system, and performance tests. You want the system to run at least as well after the change as it did before the change! For safety critical code, or software that resides on the same platform as safety critical code, the software safety tests must be repeated, even for minor changes.  4.6.7 Software Safety Testing Developers must perform software safety testing to ensure that hazards have been eliminated or controlled to an acceptable level of risk. Also, document safety-related test descriptions, procedures, test cases, and the associated qualifications criteria. Implementation of safety requirements (inhibits, traps, interlocks, assertions, etc.) shall be verified. Verify that the software functions safely both within its specified environment (including extremes), and under specified abnormal and stress conditions. For example, two failure tolerant systems should be exercised in all predicted credible two failure scenarios. IEEE 1228-1994, Software Safety Plans, specifies that the following software safety tests may be performed: Computer software unit level testing that demonstrates correct execution of critical software elements. Interface testing that demonstrates that critical computer software units execute together as specified. Computer software configuration item testing that demonstrates the execution of one or more system components. System-level testing that demonstrates the softwares performance within the overall system. Stress testing that demonstrated the software will not cause hazards under abnormal circumstances, such as unexpected input values or overload conditions. Regression testing that demonstrates changes made to the software did not introduce conditions for new hazards. Software Safety, usually represented by the Software Quality Assurance organization, should participate in the testing of safety-critical computer software components at all levels of testing, including informal testing, system integration testing, and Software Acceptance testing. Benefit-to-Cost Rating: HIGH 4.6.8 Test Witnessing Software safety personnel should ensure that tests of safety-critical components are conducted in strict accordance with the approved test plans, descriptions, procedures, scripts and scenarios, and that the results are accurately logged, recorded, documented, analyzed, and reported. Ensure that deficiencies and discrepancies are corrected and retested. In addition to testing under normal conditions, the software should be tested to show that unsafe states can not be generated by the software as the result of feasible single or multiple erroneous inputs. This should include those outputs which might result from failures associated with the entry into, and execution of, safety-critical computer software components. Negative and No-Go testing should also be employed, and should ensure that the software only performs those functions for which it is intended, and no extraneous functions. Lists of specific tests for safety-critical software can be found in  HYPERLINK \l "_3.3_Incorporating_Software" Section 3.3 Incorporating Software Safety into Software Development in this Guidebook. Witnessing verifies that the software performs properly and safely during system integration stress testing and system acceptance testing. System acceptance testing should be conducted under actual operating conditions or realistic simulations. 4.7 Software Acceptance and Delivery Phase Once the software has passed acceptance testing it can be released either as a stand-alone item, or as part of a larger system acceptance. An Acceptance Data Package (ADP) should accompany the release of the software. This package should include, as a minimum, the following: Instructions for installing all safety-critical items. Define the hardware environment for which the software is certified. Liens identify all safety-related software liens, such as missing design features, or untested features. Constraints describe operational constraints for hazardous activities. List all open, corrected but not tested, or uncollected safety-related problem reports. Describe environmental limitations of use, allowable operational envelope. Operational procedures describe all operational procedures and operator interfaces. Include failure diagnosis and recovery procedures. In addition, the ADP should contain the following: Certificate of Conformance to requirements, validated by Quality Assurance Organization, and IV & V Organization (if applicable). Waivers lists any waivers of safety requirements. Program Set Acceptance Test Results lists results of safety tests. New or Changed Capabilities lists any approved change requests for safety-critical items. Problem Disposition lists any safety-related problem reports. Version Description Document describes as built versions of software modules to be used with this release of the system. 4.8 Software Operations & Maintenance Maintenance of software differs completely from hardware maintenance. Unlike hardware, software does not degrade or wear out over time, so the reasons for software maintenance are different. The main purposes for software maintenance are as follows: to correct known defects to correct defects discovered during operation to add or remove features and capabilities (as requested by customer, user or operator) to compensate or adapt for hardware changes, wear out or failures. The most common safety problem during this phase is lack of configuration control, resulting in undocumented and poorly understood code. Patching is a common improper method used to fix software on the fly. Software with multiple undocumented patches has resulted in major problems where it has become completely impossible to understand how the software really functions, and how it responds to its inputs. In some cases, additional software has been added to compensate for unexpected behavior which is not understood. ( for example, garbage collectors). It is beneficial to determine and correct the root cause of unexpected behavior, otherwise the software can grow in size to exceed available resources, or become unmanageable. After software becomes operational, rigorous configuration control must be enforced. For any proposed software change, it is necessary to repeat all life cycle development and analysis tasks performed previously from requirements (re-) development through code (re-)test. Full original testing is recommended as well as any additional tests for new features. It is advisable to perform the final verification testing on an identical off-line analog (or simulator) of the operational software system, prior to placing it into service. 5. SOFTWARE SAFETY ANALYSIS Safety is an integral part of the software life-cycle, from the specification of safety-related requirements, through inspection of the software controls, and into verification testing for hazards. Within each life-cycle phase, the safety engineer performs various analysis tasks. If problems are found, they are fed back through the system until they are corrected or mitigated. While finding unsafe elements of the system is often the focus of the analyses, a negative analysis (no hazards or major problems) can give the project assurance that they are on the right path to a safe system. This section describes various techniques that have been useful in NASA activities and within industry. How to do the analysis (the methodology), what are the inputs, and what products are generated, is described in each case. Wherever possible, checklists are provided to aid in the use of the technique. Analysis techniques fall into two categories: Top down system hazards and failure analyses, which look at possible hazards or faults and trace down into the design to find out what caused them. Bottom up review of design products to identify failure modes not predicted by top down analysis. This will ensure validity of assumptions of top down analysis, and verify conformance to requirements. Typically, both types of analyses will be used in a typical software safety analysis activity, though the specific techniques used will be tailored for the project. A benefit-to-cost rating is given for each technique, to assist in the planning of software safety activities. The rating is HIGH (benefits far outweigh costs), MEDIUM (less benefits or higher cost) and LOW (high cost for what benefits you get). This scale is subjective and meant to be only one consideration when choosing analysis techniques. Results of software safety analysis are reported back to the system safety organization for integration in the system safety plan. For example, a new software hazard may require changes to the hardware configuration to mitigate it. Or an analysis may show that software can contain a hazard sufficiently to allow it to be a control. As the software becomes more defined within the software life cycle, individual program sets, modules, units, etc. are identified that are safety-critical. The analyses used vary with the phase of development, building on previous analyses or using the new level of software definition to refine the safety analysis. Each set of analyses are described in the following sections: 5.1  HYPERLINK \l "_5.1_Software_Safety" Software Safety Requirements Analysis 5.2  HYPERLINK \l "_5.2_Architectural_Design" Architectural Design Analysis 5.3  HYPERLINK \l "_5.3_Detailed_Design" Detailed Design Analysis 5.4 HYPERLINK \l "_5.4_Code_Analysis"Code Analysis 5.5 HYPERLINK \l "_5.5_Test_Analysis"Test Analysis 5.6  HYPERLINK \l "_5.6_Operations_&" Operations & Maintenance 5.1 Software Safety Requirements Analysis The Requirements Analysis Activity verifies that safety requirements for the software were properly flowed down from the system safety requirements, and that they are correct, consistent and complete. It also looks for new hazards, software functions that can impact hazard controls, and ways the software can behave that are unexpected. These are primarily top down analyses. Bottom up analysis of software requirements are performed such as Requirements Criticality Analysis to identify possible hazardous conditions. This results in another iteration of the PHA that may generate new software requirements. Specification analysis is also performed to ensure consistency of requirements. Analyses included in the Software Requirements Phase are: Software Safety Requirements Flow-down Analysis Requirements Criticality Analysis Specification Analysis Formal Inspections Timing, Throughput And Sizing Analysis Preliminary Software Fault Tree Analysis 5.1.1 Software Safety Requirements Flow-down Analysis Safety requirements are flowed down into the system requirements specifications, and from there into the subsystem specifications, as described in  HYPERLINK \l "_4.2.1.1_Safety_Requirements" Section 4.2.1.1. This includes the software subsystem requirements. Problems in the flow-down process can be caused by incomplete analysis, inconsistent analysis of highly complex systems, or use of ad hoc techniques by biased or inexperienced analysts. The following references are a good starting point for anyone who falls into the inexperienced category : MIL-STD-882C System Safety Program Requirements (the C version, not the current D, has some description on how to verify flow down of requirements) NSTS-22254 Methodology for Conduct of Space Shuttle Program Hazard Analyses Safeware : System Safety and Computers (Book), Nancy Leveson, April 1995 Safety-Critical Computer Systems (Book), Neil Storey, August 1996 Software Assessment: Reliability, Safety, Testability (Book), Michael A. Friedman and Jeffrey M. Voas(Contributor), August 16, 1995 Discovering System Requirements, A. Terry Bahill and Frank F. Dean,  HYPERLINK "http://tide.it.bond.edu.au/inft390/002/Resources/sysreq.htm" http://tide.it.bond.edu.au/inft390/002/Resources/sysreq.htm The most rigorous (and most expensive) method of addressing this concern is adoption of formal methods for requirements analysis and flow-down. This was described previously in  HYPERLINK \l "_4.2.4_Formal_Methods" Section 4.2.3 Formal Methods - Specification Development. Less rigorous and less expensive ways include checklists and/or a standardized structured approach to software safety as discussed below and throughout this guidebook. Benefit-to-Cost Rating: HIGH 5.1.1.1 Checklists and cross references Checklists are a tool for making sure you havent forgotten anything important, while doing an analysis or reviewing a document. They are a way to put the collective experience of those who created and reviewed the checklist to work on your project. They are a starting point, and should be reviewed for relevance for each project. A collection of checklists is provided in Appendix E. For the requirements phase, they include a safety checklist that contains standard hazards to look for when reviewing the requirements specification and a checklist of questions to think about when reviewing the PHA. Cross references are matrices that list related items. A matrix that shows the software related hazards and hazard controls and their corresponding safety requirements should be created and maintained. This should be a living document, reviewed and updated periodically. Refreshing your mind on the hazards that software must control while working on the software design, for example, increases the likelihood that the hazard controls will be designed in correctly. Another cross reference matrix would list each requirement and the technique that will verify it (analysis, test, etc.). You should develop a systematic checklist of software safety requirements and hazard controls, ensuring they correctly and completely include (and cross-reference) the appropriate specifications, hazard analyses, test and design documents. This should include both generic and specific safety requirements as discussed in  HYPERLINK \l "_4.2.1_Development_of" Section 4.2.1 Development of Software Safety Requirements. HYPERLINK \l "_4.2.6_Formal_Inspections"Section 4.2.5 Formal Inspections, lists some sources for starting a safety checklist. Also, develop a hazard requirements flow-down matrix which maps safety requirements and hazard controls to system/software functions and, from there, to software modules and components. Where components are not yet defined, flow to the lowest level possible and tag for future flow-down. Benefit-to-Cost Rating: HIGH 5.1.2 Requirements Criticality Analysis Criticality analysis identifies program requirements that have safety implications. A method of applying criticality analysis is to analyze the hazards of the software/hardware system and identify those that could present catastrophic or critical hazards. This approach evaluates each program requirement in terms of the safety objectives derived for the software component. The evaluation will determine whether the requirement has safety implications and, if so, the requirement is designated safety critical. It is then placed into a tracking system to ensure traceability of software requirements throughout the software development cycle from the highest level specification all the way to the code and test documentation. All of the following techniques are focused on safety critical software components. The system safety organization coordinates with the project system engineering organization to review and agree on the criticality designations. Software safety engineers and software development engineers should be included in this discussion. Software is a vital component in the whole system, and the software viewpoint must be part of any systems engineering activity. Requirements can be consolidated to reduce the number of critical requirements. In addition, they can be flagged for special attention during design, to reduce the criticality level. Keep in mind that not all safety critical requirements are created equal. Later in the process, the concept of risk is used to prioritize which requirements or components are more critical than others. For now, its best to look at everything that can cause a safety problem, even a trivial one. Its easier, and cheaper, to remove or reduce requirements later than it is to add them in. It is probable that software components or subsystems will not be defined during the Requirements Phase, so those portions of the Criticality Analysis would be deferred to the Architectural Design Phase. In any case, the Criticality Analysis will be updated during the Architectural Design Phase to reflect the more detailed definition of software components. You perform the Requirements Criticality Analysis by doing the following: All software requirements are analyzed to identify additional potential system hazards that the system PHA did not reveal. A checklist of PHA hazards is a good thing to have while reviewing the software requirements. The checklist makes it easier to identify PHA-designated hazards that are not reflected in the software requirements, and new hazards missed by the PHA. In addition, look for areas where system requirements were not correctly flowed to the software. Once potential hazards have been identified, they are added to the system requirements and then flowed down to subsystems (hardware, software and operations) as appropriate. Review the system requirements to identify hardware or software functions that that receive, pass, or initiate critical signals or hazardous commands. Review the software requirements to verify that the functions from the system requirements are included. In addition, look for any new software functions or objects that receive/pass/initiate critical signals or hazardous commands. Look through the software requirements for conditions that may lead to unsafe situations. Consider conditions such as out-of-sequence, wrong event, inappropriate magnitude, incorrect polarity, inadvertent command, adverse environment, deadlocking, and failure-to-command modes. The software safety requirements analysis considers such specific requirements as the characteristics discussed below in  HYPERLINK \l "_5.1.2.1_Critical_Software" Section 5.1.2.1 Critical Software Characteristics. The following resources are available for the Requirements Criticality Analysis:  Software Development Activities Plan [Software Development Plan] Software Assurance Plan [None], Software Configuration Management Plan [Same] and Risk Management Plan [Software Development Plan] Background information relating to safety requirements associated with the contemplated testing, manufacturing, storage, repair, installation, use, and final disposition of the system  System and Subsystem Requirements [System/Segment Specification (SSS), System/Segment Design Document] Storage and timing analyses and allocations Requirements Document [Software Requirements Specifications] Program structure documents External Interface Requirements Document [Interface Requirements Specifications] and other interface documents Information from the system PHA concerning system energy, toxic, and other hazardous event sources, especially ones that may be controlled directly or indirectly by software Functional Flow Diagrams and related data Historical data such as lessons learned from other systems and problem reports Note: documents in [parentheses] correspond to terminology from DOD-STD-2167 [2]. Other document names correspond to NASA-STD-2100.91. Output products from this analysis are: Table 5-1 Subsystem Criticality Matrix Updated Safety Requirements Checklist Definition of Safety Critical Requirements The results and findings of the Criticality Analyses should be fed back to the System Requirements and System Safety Analyses. For all discrepancies identified, either the requirements should be changed because they are incomplete or incorrect, or else the design must be changed to meet the requirements. The Criticality Analysis identifies additional hazards that the system analysis did not include, and identifies areas where system or interface requirements were not correctly assigned to the software. The results of the criticality analysis may be used to develop Formal Inspection (FI) checklists for performing the FI process described in  HYPERLINK \l "_4.2.5_Formal_Inspections" Section 4.2.5 Formal Inspections of Specifications. Benefit-to-Cost Rating: HIGH 5.1.2.1 Critical Software Characteristics Not all characteristics of the software are governed by requirements. Some characteristics are a result of the design, which may fulfill the requirements in a variety of ways. It is important that safety critical characteristics are identified and explicitly included in the requirements. Forgotten safety requirements often come back to bite you late in the design or coding stages. All characteristics of safety critical software must be evaluated to determine if they are safety critical. Safety critical characteristics should be controlled by requirements that receive rigorous quality control in conjunction with rigorous analysis and test. Often all characteristics of safety critical software are themselves safety critical. Characteristics to be considered include at a minimum: Specific limit ranges Out of sequence event protection requirements (e.g., if-then statements) Timing Relationship logic for limits (Allowable limits for parameters might vary depending on operational mode or mission phase. Expected pressure in a tank varies with temperature, for example.) Voting logic Hazardous command processing requirements (see HYPERLINK \l "_4.2.2.2__Hazardous"Section 4.2.2.2 Hazardous Commands) Fault response Fault detection, isolation, and recovery Redundancy management/switchover logic (What to switch, and under what circumstances, should be defined as methods to control hazard causes identified in the hazards analyses. For example, equipment which has lost control of a safety critical function should be switched to a good spare before the time to criticality has expired. Hot standby units (as opposed to cold standby) should be provided where a cold start time would exceed time to criticality.) This list is not exhaustive and often varies depending on the system architecture and environment. Table 5-1 Subsystem Criticality Matrix Mission Operational Control Functions HazardsIMICAICDCommunicationXXXGuidanceXNavigationXCamera OperationsXAttitude ReferenceXXXControlXXPointingXSpecial ExecutionRedundancy ManagementXMission SequencingXMode ControlXXKeyIMIInadvertent Motor IgnitionCACollision AvoidanceICDInadvertent Component DeploymentThe above matrix is an example output of a software  HYPERLINK \l "_5.1.2_Requirements_Criticality" 5.1.2 Requirements Criticality Analysis. Each functional subsystem is mapped against system hazards identified by the PHA. In this example, three hazards are addressed. This matrix is an essential tool to define the criticality level of the software. Each hazard should have a risk index as described in HYPERLINK \l "_2.3.1.2_Risk_Levels"Section 2.4.1.2 Risk Levels of this guidebook. The risk index is a means of prioritizing the effort required in developing and analyzing respective pieces of software. 5.1.3 Specification Analysis Specification analysis evaluates the completeness, correctness, consistency, and testability of software requirements. Well-defined requirements are strong standards by which to evaluate a software component. Specification analysis should evaluate requirements individually and as an integrated set. Techniques used to perform specification analysis are: control-flow analysis, information-flow analysis functional simulation These techniques are described in detail (plus background and theory) within a large, well established body of literature. Look in books on software testing and software engineering for further information on these techniques. A brief description of each technique will be given so that the analyst can determine if further study is warranted. The safety organization should ensure the software requirements appropriately influence the software design and the development of the operator, user, and diagnostic manuals. The safety agency should review the following documents and/or data:  System/segment specification and subsystem specifications Storage allocation and program structure documents Software requirements specifications Background information relating to safety requirements Interface requirements specifications and all other interface documents Information concerning system energy, toxic and other hazardous event sources, especially those that may be controlled directly or indirectly by software Functional flow diagrams and related data Software Development Plan, Software Quality Evaluation Plan, and Software Configuration Management Plan and Historical data 5.1.3.1 Control-flow analysis Control-flow analysis examines the order in which software functions will be performed. It identifies missing and inconsistently specified functions. Control-flow examines which processes are performed in series, and which in parallel (e.g., multitasking), and which tasks are prerequisites or dependent upon other tasks. Benefit-to-Cost Rating: HIGH 5.1.3.2 Information-flow analysis Information-flow analysis examines the relationship between functions and data. Incorrect, missing, and inconsistent input/output specifications are identified. Data flow diagrams are commonly used to report the results of this activity, so this technique is best used during architectural design. However, it can also be effective during fast prototyping and/or spiral life cycle models for early, basic data and command flow. Benefit-to-Cost Rating: HIGH 5.1.3.3 Functional simulation models Simulators are useful development tools for evaluating system performance and human interactions. You can examine the characteristics of a software component to predict performance, check human understanding of system characteristics, and assess feasibility. Simulators have limitations in that they are representational models and sometimes do not accurately reflect the real design, or make environmental assumptions which can differ from conditions in the field. Benefit-to-Cost Rating: MEDIUM 5.1.4 Formal Inspections Software Formal Inspections (described in  HYPERLINK \l "_4.2.6_Formal_Inspections" 4.2.5 Formal Inspections of Specifications) are an important activity for safety to participate in, especially during the requirements phase. Early in the life-cycle of the project is a very good time at which to express safety concerns and recommend changes. In addition, the safety representative can more thoroughly learn about the system and how it is supposed to work. As part of the inspection activity, a safety checklist should be created to use when reviewing the requirements. This checklist should be based on the safety requirements discussed in  HYPERLINK \l "_4.2_Software_Requirements" Section 4.2 Software Requirements Phase. The generic requirements portion of the checklist should be tailored to emphasize those areas most relevant and those most likely to be omitted or not satisfied. Reference [6] Targeting Safety-Related Errors During Software Requirements Analysis contains a good checklist relevant to the NASA environment. After the inspection, the safety representative should review the official findings of the inspection and translate any that require safety follow-up on to a worksheet such as that in  HYPERLINK \l "table42" Table 4-2 Subsystem Criticality Analysis Report Form. This form can then serve in any subsequent inspections or reviews as part of the checklist. It will also allow the safety personnel to track to closure safety specific issues that arise during the course of the inspection. Benefit-to-Cost Rating: HIGH 5.1.5 Timing, Throughput And Sizing Analysis Timing, throughput and sizing analysis for safety critical functions evaluates software requirements that relate to execution time, I/O data rates and memory/storage allocation. This analysis focuses on program constraints. Typical constraint requirements are maximum execution time, maximum memory usage, maximum storage size for program, and I/O data rates the program must support. The safety organization should evaluate the adequacy and feasibility of safety critical timing, throughput and sizing requirements. These analyses also evaluate whether adequate resources have been allocated in each case, under worst case scenarios. For example, will I/O channels be overloaded by many error messages, preventing safety critical features from operating. Items to consider include: memory usage versus availability; I/O channel usage (load) versus capacity and availability; execution times versus CPU load and availability; sampling rates versus rates of change of physical parameters. program storage space versus executable code size amount of data to store versus available capacity Quantifying timing/sizing resource requirements can be very difficult. Estimates can be based on the actual parameters of similar existing systems. Memory usage versus availability Assessing memory usage can be based on previous experience of software development if there is sufficient confidence. More detailed estimates should evaluate the size of the code to be stored in the memory, and the additional space required for storing data and scratch pad space for storing interim and final results of computations (heap size). As code is developed, particularly prototype or simulation code, the memory estimates should be updated. Consider carefully the use of Dynamic Memory Allocation in safety critical code or software that can impact on the safety critical portions. Dynamic memory allocation can lead to problems from not freeing allocated memory (memory leak), freeing memory twice (causes exceptions), or buffer overruns that overwrite code or other data areas. When data structures are dynamically allocated, they often cannot be statically analyzed to verify that arrays, strings, etc. do not go past the physical end of the structure. I/O channel usage (Load) versus capacity and availability Look at the amount of input data (science data, housekeeping data, control sensors) and the amount of output data (communications) generated. I/O channel should include internal hardware (sensors), interprocess communications (messages), and external communications (data output, command and telemetry interfaces). Check for resource conflicts between science data collection and safety critical data availability. During failure events, I/O channels can be overloaded by error messages and these important messages can be lost or overwritten. (e.g. the British Piper Alpha offshore oil platform disaster). Possible solutions includes additional modules designed to capture, correlate and manage lower level error messages or errors can be passed up through the calling routines until at a level which can handle the problem; thus, only passing on critical faults or combinations of faults, that may lead to a failure. Execution times versus CPU load and availability Investigate the time variations of CPU load and determine the circumstances that generate peak load. Is the execution time under high load conditions acceptable? Consider the timing effects from multitasking, such as message passing delays or the inability to access a needed resource because another task has it. Note that excessive multitasking can result in system instability leading to crashes. Also consider whether the code will execute from RAM or from ROM, which is often slower to access. Sampling rates versus rates of change of physical parameters Design criteria for this is discussed in Section 4.2.2.5 Timing, Sizing and Throughput Considerations. Analysis should address the validity of the system performance models used, together with simulation and test data, if available. Program storage space versus executable code size Estimate the footprint of the executable software in the device it is stored in (EPROM, flash disk, etc.). This is may be less than the memory footprint, as only static or global variables take up space. However, if not all modules will be in memory at the same time, then the executable size may be larger. The program size includes the operating system as well as the application software. Amount of data to store versus available capacity Consider how much science, housekeeping, or other data will be generated and the amount of storage space available (RAM, disk, etc.). If the data will be sent to the ground and then deleted from the storage media, then some analysis should be done to determine how often, if ever, the disk will be full. Under some conditions, being unable to save data or overwriting previous data that has not been downlinked could be a safety related problem. Benefit-to-Cost Rating: HIGH 5.1.6 Software Fault Tree Analysis It is possible for a system to meet requirements for a correct state and to also be unsafe. Complex systems increase the chance that unsafe modes will not be caught until the system is in the field. Fault Tree Analysis (FTA) is one method that focuses on how errors, or even normal functioning of the system, can lead to hazards. The requirements phase is the time to perform a preliminary software fault tree analysis (SFTA). This is a top down analysis, looking for the causes of presupposed hazards. The top of the tree (the hazards) must be known before this analysis is applied. The Preliminary Hazard Analysis (PHA) is the primary source for hazards, along with the Requirements Criticality Analysis and other analyses described above. The results of a fault tree analysis is a list of failures, or combination of failures, that can lead to a hazard. Some of those failures will be in software. At this top level, the failures will be very general (e.g., computer fails to raise alarm). When this analysis is updated in later phases, the failures can be assigned to specific functions or modules. FTA was originally developed in the 1960's for safety analysis of the Minuteman missile system. It has become one of the most widely used hazard analysis techniques. In some cases FTA techniques may be mandated by civil or military authorities. Most of the information presented in this section is extracted from Leveson et al. [13,14]. FTA is a complex subject, and is described further in HYPERLINK \l "_APPENDIX_B_Software"Appendix B. Benefit-to-Cost Rating: MEDIUM 5.1.7 Conclusion Using the above techniques will provide some level of assurance that the software requirements will result in a design which satisfies safety objectives. The extent to which the techniques should be used depends on the degree of criticality of the system and its risk index, as discussed in Section 3. Some of these analyses will need to be updated during design and code phases as the system becomes more defined and details are specified. The output of the Software Safety Requirements Analyses (SSRA) are used as input to follow-on software safety analyses. The SSRA is presented at the Software Requirements Review (SSR)/Software Specification Review (SSR) and system-level safety reviews. The results of the SSRA shall be provided to the ongoing system safety analysis activity. Having developed and analyzed the baseline software requirements set, an architectural design is developed as per  HYPERLINK \l "_4.3_Architectural_Design" Section 4.3 Architectural Design Phase.  HYPERLINK \l "_5.2_Architectural_Design" Section 5.2 Architectural Design Analysis below describes analysis tasks for the architectural design. 5.2 Architectural Design Analysis The software architectural design process develops the high level design that will implement the software requirements. All software safety requirements developed in  HYPERLINK \l "_4.2.1_Development_of" Section 4.2.1 are incorporated into the high level software design as part of this process. The design process includes identification of safety design features and methods (e.g., inhibits, traps, interlocks and assertions) that will be used throughout the software to implement the software safety requirements. After allocation of the software safety requirements to the software design, Safety Critical Computer Software Components (SCCSCs) are identified. Safety analyses are performed on the architectural design to identify potential hazards and to define and analyze SCCSCs (Safety Critical Computer Software Components). Early test plans are reviewed to verify incorporation of safety related testing. Software safety analyses from  HYPERLINK \l "_5.1_Software_Safety" Section 5.1 are updated as appropriate during this phase. Analyses included in the Architectural Design Phase are: Update Criticality Analysis Conduct Hazard Risk Assessment Analyze Architectural Design Interdependence Analysis Independence Analysis Update Timing/Throughput/Sizing Analysis Update Software Fault Tree Analysis Perform preliminary Software Failure Modes and Effects Analysis 5.2.1 Update Criticality Analysis At this stage of development, the software functions begin to be allocated to modules and components. Software for a system, while often subjected to a single development program, actually consists of a set of multipurpose, multifunction entities. The software functions need to be subdivided into many modules and further broken down to components. Some of these modules will be safety critical, and some will not. Each module or component that implements a safety critical requirement must now be assigned a criticality index, based on the criticality analysis (See  HYPERLINK \l "_5.1.2_Requirements_Criticality" Section 5.1.2 Requirements Criticality Analysis). The safety activity in this phase is to relate the identified hazards from the following analyses to the Computer Software Components (CSCs) that may affect or control the hazards. AnalysisGuidebook SectionPreliminary Hazard Analysis (PHA) HYPERLINK \l "_2.3_Preliminary_Hazard" 2.4Software Subsystem Hazard Analysis HYPERLINK \l "_2.5_Software_Subsystem" 2.5Software Safety Requirements Analysis HYPERLINK \l "_5.1_Software_Safety" 5.1 Develop a matrix that lists all SCCSCs and the safety requirements they relate to. Include any modules that can affect the SCCSCs as well. This would include modules that write to data in memory shared with the SCCSCs, or that provide information to the safety critical modules. The designation Safety Critical Computer Software Component (SCCSC) should be applied to any module, component, subroutine or other software entity identified by this analysis. 5.2.2 Conduct Hazard Risk Assessment Once SCCSCs have been identified, system hazard risk assessment is done to prioritize them. Not all SCCSCs warrant further analysis beyond the architectural design level, nor do all warrant the same depth of analysis. System risk assessment of hazards, as described in the NHB 1700 series of documents, consists of ranking hazards by severity level versus probability of occurrence. This high-severity/high probability hazards are prioritized higher for analysis and corrective action than low-severity/low probability hazards. While  HYPERLINK \l "_5.1.2_Requirements_Criticality" Sections 5.1.2 Requirements Criticality Analysis and  HYPERLINK \l "_5.2.1_Update_Criticality" 5.2.1 Update Criticality Analysis simply assign a Yes or No to whether each component is safety critical, the Risk Assessment process takes this further. Each SCCSCs is prioritized for analysis and corrective action according to the five levels of Hazard Prioritization ranking given previously in  HYPERLINK \l "table22" Table 2-2 Hazard Prioritization - System Risk Index. Determination of the severity and the probability of the hazards is sometimes a source of contention between the safety group and the project. It is best to sit down and work out any disagreements at an early stage. Getting the software development groups buy in on what is truly safety critical is vital. Software developers may ignore what they do not see as important. Getting everybody on one side early prevents the problem of forcing the project to add safety code or testing later in the development cycle. Benefit-to-Cost Rating: HIGH 5.2.3 Analyze Architectural Design Okay, youve got your list of SCCSCs that will be further analyzed. Next you analyze the Architectural (high level) design of those components to ensure all safety requirements are specified correctly and completely. In addition, review the Architectural Design, looking for places and conditions that lead to unacceptable hazards. This is done by postulating credible faults/failures and evaluating their effects on the system. Consider the following types of events and failures: input/output timing multiple event outofsequence event failure of event wrong event inappropriate magnitude incorrect polarity adverse environment deadlocking in a multitasking system hardware failures Formal Inspections (see  HYPERLINK \l "_4.2.6_Formal_Inspections" 4.2.5 Formal Inspections of Specifications), design reviews and prototype, animation or simulation augment this process. 5.2.3.1 Design Reviews Design reviews are conducted to verify that the design meets the requirements. Often the reviews are formal (Preliminary Design Review (PDR) and Critical Design Review (CDR)) and for the whole system. Separate software PDRs and CDRs may be held, or the software may be a part of the system review. In all cases, software safety should be addressed. Does the design meet all the applicable software safety requirements? Does the design use best practices to reduce potential errors? Are safety critical modules properly separated from the rest of the software? This is the time to make design changes, if necessary, to fulfill the safety requirements. Applicability matrices, compliance matrices, and compliance checklists are resources which can be used to assist in completing this task. Output products from the reviews are engineering change requests, hazard reports (to capture design decisions affecting hazard controls and verification) and action items. Benefit-to-Cost Rating: HIGH 5.2.3.2 Prototype/Animation/Simulation When questions exist about the ability of the software to meet a requirement, a prototype or simulator is often used to find the answer. This software is quick and dirty and used only for determining if the system can do what it needs to do. Prototypes may also be used to get the customers input into a user interface. Test cases that exercise crucial functions can be developed along with the prototype. Just run the tests and observe the system response. If the requirements can not be met, then they must be modified as appropriate. Documented test results can confirm expected behavior or reveal unexpected behavior. Keep in mind, however, that the tests are of a prototype or simulation. The behavior of the real software may differ. Benefit-to-Cost Rating: MEDIUM 5.2.4 Interface Analysis 5.2.4.1 Interdependence Analysis Examine the software design to determine the interdependence among Computer Software Components (CSCs), modules, tables, variables, etc. Elements of software which directly or indirectly influence SCCSCs are also identified as SCCSCs, and as such should be analyzed for their undesired effects. For example, shared memory blocks used by an SCCSC and another CSC (safety critical or not). The inputs and outputs of each SCCSC are inspected and traced to their origin and destination. Benefit-to-Cost Rating: MEDIUM 5.2.4.2 Independence Analysis The safety critical CSCs (SCCSCs) should be independent of non-critical functions. Independence Analysis is a way to verify that.. Those CSCs that are found to affect the output of SCCSCs are designated as SCCSCs themselves. Areas where FCR (Fault Containment Region) integrity is compromised are identified. To perform this analysis, map the safety critical functions to the software modules, and then map the software modules to the hardware hosts and FCRs. All the input and output of each SCCSC should be inspected. Consider global or shared variables, as well as the directly passed parameters. Consider side effects that may be included when a module is run. If a non-critical CSC modifies an SCCSC, either directly, by violation of an FCR or indirectly through shared memory, then it becomes an SCCSC itself. Resources used in this analysis are the definition of safety critical functions (MWF and MNWF) that need to be independent (from HYPERLINK \l "_4.2.2.3__Hazardous"  Section 4.2.2.2 Hazardous Commands), design descriptions, and data and flow diagrams. Design changes to achieve valid FCRs and corrections to SCCSC designations may be necessary. At this point, some bottom-up analyses can be performed, like a Software FMEA. Bottom-up analyses identify requirements or design implementations that are inconsistent with, or not addressed by, system requirements. Bottom-up analyses can also reveal unexpected pathways (e.g., sneak circuits) for reaching hazardous or unsafe states. System requirements should be corrected when necessary. Benefit-to-Cost Rating: MEDIUM 5.2.5 Update Timing, Throughput, and Sizing Analysis Now that initial design issues have been decided, review the Timing, Throughput, and Sizing analysis and update it as appropriate. 5.2.6 Update Software Fault Tree Analysis The preliminary software fault tree generated in the requirements phase can now be expanded. Broad functions can now be specified to some module, at least at a high level. In addition, the system is now known to a greater depth. Failures that were credible during the requirements phase may no longer be possible. Additional causes for the top hazard may be added to the tree. 5.2.7 Formal Inspections of Architectural Design Products The process of Formal Inspection begun in previous requirements phase (e.g.  HYPERLINK \l "_5.1.4_Formal_Inspections" Section5.1.4 Formal Inspections) should continue. The preliminary (architectural) design should go through a formal review process. Create new checklists that are appropriate to the design products. Include lessons learned from the requirements phase. Benefit-to-Cost Rating: HIGH 5.2.8 Formal Methods and Model Checking During the architectural design phase, the requirements specified are converted into a high-level design. Standard structured methodologies (object oriented or functional) are used to create the preliminary design. If using formal methods (formal specification), the requirements specification is fleshed out with increasing detail. In the example in [40], the preliminary design consisted of the state description (state variables and state transitions) expressed in the formal specification language. The formal method design or model may be the complete architectural design, or it may be created in parallel with a normal design process. If using the parallel approach (standard software development life cycle and formal methods on separate tracks, usually with separate teams), it is important to verify that the designs created by the development team formally match those of the formal methods team. Formal Methods Benefit-to-Cost Rating: LOW Model Checking Benefit-to-Cost Rating: MEDIUM 5.3 Detailed Design Analysis During the Detailed Design phase, the software artifacts (design documents) are greatly enhanced. This additional detail now permits rigorous analyses to be performed. Detailed Design Analyses can make use of artifacts such as the following: detailed design specifications, emulators and PseudoCode Program Description Language products (PDL). Preliminary code produced by code generators within case tools should be evaluated. Many techniques to be used on the final code can be "dry run" on these design products. In fact, it is recommended that all analyses planned on the final code should undergo their first iteration on the code-like products of the detailed design. This will catch many errors before they reach the final code where they are more expensive to correct. The following techniques can be used during this design phase. Design Logic Analysis Design Data Analysis Design Interface Analysis Design Constraint Analysis Rate Monotonic Analysis Dynamic Flowgraph Analysis Markov Modeling Measurement of Complexity Selection of Programming Languages Formal Methods and Safety-Critical Considerations Requirements State Machines Formal Inspections Updates to previous analyses Description of each technique is provided below. Choice of techniques in terms of cost versus effectiveness was discussed earlier in  HYPERLINK \l "_3.2.3.3_Tailoring_the" Section 3.2.3.3 Tailoring the Effort. 5.3.1 Design Logic Analysis (DLA) Design Logic Analysis (DLA) evaluates the equations, algorithms, and control logic of the software design. Logic analysis examines the safetycritical areas of a software component. A technique for identifying safetycritical areas is to examine each function performed by the software component. If it responds to, or has the potential to violate one of the safety requirements, it should be considered critical and undergo logic analysis. A technique for performing logic analysis is to analyze design descriptions and logic flows and note discrepancies. The ultimate, fully rigorous DLA uses the application of Formal Methods (FM). Where FM is inappropriate, because of its high cost versus software of low cost or low criticality, simpler DLA can be used. Less formal DLA involves a human inspector reviewing a relatively small quantity of critical software artifacts (e.g. PDL, prototype code) , and manually tracing the logic. Safety critical logic to be inspected can include failure detection/diagnosis, redundancy management, variable alarm limits, and command inhibit logical preconditions. Commercial automatic software source analyzers can be used to augment this activity, but should not be relied upon absolutely since they may suffer from deficiencies and errors, a common concern of COTS tools and COTS in general. Benefit-to-Cost Rating: MEDIUM 5.3.2 Design Data Analysis Design data analysis evaluates the description and intended use of each data item in the software design. Data analysis ensures that the structure and intended use of data will not violate a safety requirement. A technique used in performing design data analysis is to compare description to use of each data item in the design logic. Interrupts and their effect on data must receive special attention in safetycritical areas. Analysis should verify that interrupts and interrupt handling routines do not alter critical data items used by other routines. The integrity of each data item should be evaluated with respect to its environment and host. Shared memory and dynamic memory allocation can affect data integrity. Data items should also be protected from being overwritten by unauthorized applications. Considerations of EMI and radiation affects on memory should be reviewed in conjunction with system safety. Benefit-to-Cost Rating: MEDIUM 5.3.3 Design Interface Analysis Design interface analysis verifies the proper design of a software component's interfaces with other components of the system. The interfaces can be with other software components, with hardware, or with human operators. This analysis will verify that the software component's interfaces, especially the control and data linkages, have been properly designed. Interface requirements specifications (which may be part of the requirements or design documents, or a separate document) are the sources against which the interfaces are evaluated. Interface characteristics to be addressed should include interprocess communication methods, data encoding, error checking and synchronization. The analysis should consider the validity and effectiveness of checksums, CRCs, and error correcting code. The sophistication of error checking or correction that is implemented should be appropriate for the predicted bit error rate of the interface. An overall system error rate should be defined, and budgeted to each interface. Examples of interface problems: Sender sends eight bit word with bit 7 as parity, but recipient believes bit 0 is parity. Sender transmits updates at 10 Hz, but receiver only updates at 1 Hz. Message used by sender to indicate its current state is not understood by the receiving process. Interface deadlock prevents data transfer (e.g., receiver ignores or cannot recognize Ready To Send). User reads data from wrong address. Data put in shared memory by one process is in big endian order, while the process that will use it is expecting little endian. In a language such as C, where data typing is not strict, sender may use different data types than reviewer expects. (Where there is strong data typing, the compilers will catch this). Benefit-to-Cost Rating: MEDIUM 5.3.4 Design Constraint Analysis Design constraint analysis evaluates restrictions imposed by requirements, the real world and environmental limitations, as well as by the design solution. The design materials should describe all known or anticipated restrictions on a software component. These restrictions may include: update timing, throughput and sizing constraints as per  HYPERLINK \l "_5.1.5_Timing,_Throughput" Section 5.1.5 Timing, Throughput And Sizing Analysis equations and algorithms limitations, input and output data limitations (e.g., range, resolution, accuracy), design solution limitations, sensor/actuator accuracy and calibration noise, EMI digital word length (quantization/roundoff noise/errors) actuator power / energy capability (motors, heaters, pumps, mechanisms, rockets, valves, etc.) capability of energy storage devices (e.g., batteries, propellant supplies) human factors, human capabilities and limitations [21] physical time constraints and response times off nominal environments (fail safe response) friction, inertia, backlash in mechanical systems validity of models and control laws versus actual system behavior accommodations for changes of system behavior over time: wear-in, hardware wear-out, end of life performance versus beginning of life performance, degraded system behavior and performance. Design constraint analysis evaluates the ability of the software to operate within these constraints. Benefit-to-Cost Rating: HIGH 5.3.5 Design Functional Analysis This analysis ensures that each safety-critical software requirement is covered and that an appropriate criticality level is assigned to each software element. Tracing the safety requirements throughout the design, code, and tests is vital to making sure that no requirements are lost, that safety is designed in, that extra care is taken during the coding phase, and that all safety requirements are tested. A safety requirement traceability matrix is one way to implement this analysis. Benefit-to-Cost Rating: HIGH 5.3.6 Software Element Analysis Each software element that is not safety critical is examined to assure that it cannot cause a hazard. When examining a software element, consider, at a minimum, the following ideas: Does the element interface with hardware that can cause a hazard? Does the element interface with safety-critical software elements? Can the software element tie up resources required by any safety-critical components? Can the software element enter an infinite loop? Does the software element use the same memory as safety critical data, such that an error in addressing could lead to overwriting the safety critical information? Is priority inversion or deadlocking a possibility, and can it impact a safety critical task? Can the software element affect the system performance or timing in a way that would affect a safety critical component? Does the software element call any functions also called by a safety critical component? Can it change any aspect of that function, such that the safety critical component will be affected? Is the software element on the same platform and in the same partition as a safety critical component? 5.3.7 Rate Monotonic Analysis Rate Monotonic Analysis (RMA) is a mathematical method for predicting, a priori, whether a system will meet its timing and throughput requirements when the system is operational. RMA works on systems that use static priority for the tasks. This includes nearly all commercial operating systems. RMA requires that timing information can be measured or reliably estimated for each task. For systems with hard real-time deadlines (deadlines that absolutely must be met), RMA is a valuable tool. For further details on this technique, refer to publications by Sha and Goodenough, References [22] and [25]. A case study using RMA when integrating an intelligent, autonomous software system with Flight software, as part of the NASA New Millennium project, is discussed in reference [47]. Benefit-to-Cost Rating: LOW 5.3.8 Dynamic Flowgraph Analysis Dynamic Flowgraph Analysis is a new technique that is not yet widely used and still in the experimental phase of evaluation. It does appear to offer some promise, building on the benefits of conventional  HYPERLINK \l "_5.1.6_Software_Fault" 5.1.6 Software Fault Tree Analysis (SFTA). The Dynamic Flowgraph Methodology (DFM) is an integrated, methodical approach to modeling and analyzing the behavior of software-driven embedded systems for the purpose of dependability assessment and verification. The methodology has two fundamental goals: 1) to identify how events can occur in a system; and 2) identify an appropriate testing strategy based on an analysis of system functional behavior. To achieve these goals, the methodology employs a modeling framework in which models expressing the logic of the system being analyzed are developed in terms of causal relationships between physical variables and temporal characteristics of the execution of software modules. Further description of this method is given in the paper by Garrett, Yau, Guarro and Apostolakais [20]. Benefit-to-Cost Rating: LOW 5.3.9 Markov Modeling Markov Modeling techniques were developed for complex systems and some analysts have adapted these techniques for software intensive systems. They can provide reliability, availability and maintainability data. They model probabilistic behavior of a set of equipment as a continuous time, homogeneous, discrete state Markov Process. The statistical probability of the system being in a particular macro state can be computed. These statistics can be translated into a measure of system reliability and availability for different mission phases. However, attempting to apply these types of reliability modeling techniques to software is questionable because, unlike hardware, software does not exhibit meaningful (random) failure statistics. Also, unlike hardware component failures, software failures are often not independent. Software errors depend on indeterminate human factors, such as alertness or programmer skill level. Benefit-to-Cost Rating: LOW 5.3.10 Measurement of Complexity The complexity of the software should be evaluated, because the level of complexity can affect the understandability, reliability and maintainability of the code. Highly complex data and command structures are difficult, if not impossible, to test thoroughly. Complex software is difficult to maintain, and updating the software may lead to additional errors. Not all paths can usually be thought out or tested for and this leaves the potential for the software to perform in an unexpected manner. When highly complex data and command structures are necessary, look at techniques for avoiding a high a level of programming interweaving. Linguistic and structural metrics exist for measuring the complexity of software, and are discussed below. The following references provide a more detailed discussion of and guidance on the techniques. Software State of the Art: selected papers, Tom DeMarco, Dorset House, NY, 2000. Black-Box Testing : Techniques for Functional Testing of Software and Systems, Boris Beizer, Wiley, John & Sons Inc., 1995 Applying Software Metrics, Shari Lawrence Pfleeger and Paul Oman, IEEE Press, 1997 A Framework of Software Measurement, Horst Zuse, Walter deGruyter, 1998 Metrics and Models in Software Quality Engineering, Stephen Kan, Addison Wesley, 1995 Object-Oriented Metrics Measures of Complexity, Brian Henderson-Sellers, Prentice Hall,1996 Software Metrics : A Rigorous and Practical Approach, Norman E. Fenton, PWS Publishing, 1998 Function Point Analysis: Measurement Practices for Successful Software Projects, David Garmus and David Herron, Addison, 2000 Linguistic measurements assess some property of the text without regard for the contents (e.g., lines of code, function points, number of statements, number and type of operators, total number and type of tokens, etc). Halstead's Metrics is a well known measure of several of these arguments. Structural metrics focuses on control-flow and data-flow within the software and can usually be mapped into a graphics representation. Structural relationships such as the number of links and/or calls, number of nodes, nesting depth, etc. are examined to get a measure of complexity. McCabe's Cyclomatic Complexity metric is the most well known and used metric for this type of complexity evaluation. Apply one or more complexity estimation techniques, such as McCabe or Halstead, to the design products. If an automated tool is available, the software design and/or code can be run through the tool. If there is no automated tool available, examine the critical areas of the detailed design and any preliminary code for areas of deep nesting, large numbers of parameters to be passed, intense and numerous communication paths, etc. The references above give detailed instructions on what to look for when estimating complexity. Resources used by these techniques are the detailed design, high level language description, any source code or pseudocode, and automated complexity measurement tool(s). The output products are the complexity metrics, predicted error estimates, and areas of high complexity identified for further analysis or consideration for simplification. Several automated tools are available on the market which provide these metrics. The level and type of complexity can indicate areas where further analysis, or testing, may be warranted. Do not take the numbers at face value, however! Sometimes a structure considered to be highly complex (such as a case statement) may actually be a simpler, more straight forward method of programming and maintenance, thus decreasing the risk of errors. Benefit-to-Cost Rating: HIGH 5.3.10.1 Function Points The most common size metric used is software lines of code (SLOC). While easy to measure, this metric has some problems. The lines of code it takes to produce a specific function will vary with the language more lines are needed in assembly language than in C++, for instance. Counting the lines can only be done once the code is available, and pre-coding estimates are often not accurate. When SLOC is used to calculate other measures, such as defect rates (number of defects per SLOC), the numbers get worse as the programming improves! Function Points are an alternative measurement to SLOC that focuses on the end-user, and not on the technical details of the coding. Function Point Analysis was developed by Allan Albrecht of IBM in 1979, and revised in 1983. The FPA technique quantifies the functions contained within software in terms which are meaningful to the software users. The measure relates directly to the requirements which the software is intended to address. It can therefore be readily applied throughout the life of a development project, from early requirements definition to full operational use. The function point metric is calculated by using a weighted count of the number of the following elements: User inputs provide application-oriented data to the software. User outputs provide application-oriented information to the user. This includes reports, screens, error messages, etc. Individual data items within a report are not counted separately. User inquiries are an on-line input that results in the generation of some immediate software response in the form of an on-line output. Typing a question in a search engine would be an inquiry. Files include both physical and logical files (groupings of information). External interfaces are all machine readable interfaces used to transmit information to another system. The weighting factors are based on the complexity of the software. Function Point Analysis: Measurement Practices for Successful Software Projects, by David Garmus and David Herron, provides information on calculating and using function points. The International Function Point Users Group (IFPUG,  HYPERLINK "http://www.ifpug.org/" http://www.ifpug.org/) supports and promotes the use of function points. 5.3.10.2 Function Point extensions Function points (described above in  HYPERLINK \l "_5.3.8.1_Function_Points" 5.3.10.1 Function Points) are business (database, transaction) oriented. Extensions are needed for systems and engineering software applications, such as real-time, process control, and embedded software. Feature points are one such extension. This metric takes into account algorithmic complexity. A feature point value is the sum of the weighted function point factors and the weighted algorithm count. Algorithms include such actions as inverting a matrix, decoding a bit string, or handling an interrupt. Feature points were developed in 1986 by Capers Jones. The 3D function point was developed by Boeing for real-time and embedded systems. The Boeing approach integrates the data, functional, and control dimensions of a software system. The data dimension is essentially the standard function point. The functional dimension counts the transformations, which are the number of internal operations to transform the input data into output data. The control dimension is measured by counting the number of transitions between states. 5.3.11 Selection of Programming Languages When choosing a programming language, many factors are important. The memory size and execution speed of an algorithm developed in a particular language is one factor. The existence of tools (compiler, integrated development environment, etc.) that support the language for the specified processor and on the development platform, and the availability of software engineers who have training and experience with the language are also important. When developing safety critical applications or modules, however, the safeness of the programming language should be a high priority factor. A safe programming language is one in which the translation from source to object code can be rigorously verified. Compilers that are designed to use safe subsets of a programming language are often certified, guaranteeing that the object code is a correct translation of the source code. In a more general sense, a safe language is one that enforces good programming practices, and that finds errors at compile time, rather than at run time. Safe languages have strict data types, bounds checking on arrays, and discourage the use of pointers, among other features.  HYPERLINK \l "_6._Programming_Languages_1" Section 6 SOFTWARE DEVELOPMENT ISSUES  contains a technical overview of safety-critical coding practices for developers and safety engineers and detailed discussion of specific programming languages. Many of the coding practices involve restricting the use of certain programming language constructs. Reading knowledge of a high level language (structural or object-oriented) is required to understand the concepts that are being discussed. Section 6 will provide an introduction on the criteria for evaluating the risks associated with the choice of a particular programming language. Some are well suited for safety-critical applications, and therefore engender a lower risk. Others are less safe and, if chosen, require additional analysis and testing to assure the safety of the software. Where appropriate, safe subsets of languages will be described. Common errors (bugs) in software development are also included. When choosing a language, consider the language environment (compiler, Integrated Development Environment (IDE), debugger, etc.) as well. Is the compiler certified? Is there a list of known defects or errors produced by the compiler? Does the code editor help find problems by highlighting or changing the color of language-specific terms? Does the compiler allow you to have warnings issued as errors, to enforce conformance? Is there a debugger that allows you to set break points and look at the source assembly code? No programming language is guaranteed to produce safe software. The best languages enforce good programming practices, make bugs easier for the compiler to find, and incorporate elements that make the software easier to verify. Even so, the safeness and reliability of the software depend on many other factors, including the correctness of the requirements and the proper implementation of the requirements by the design. Humans are involved in all aspects of the process, and we are quite capable of subverting even the safest of languages. Select a language based on a balance of all factors, including safety. Benefit-to-Cost Rating: HIGH 5.3.12 Formal Methods and Model Checking During the detailed design phase, the architectural (preliminary) design is filled in with all the necessary details to fully specify the system. Standard structured methodologies (object oriented or functional) are used to create the detailed design. If using formal methods (formal specification), the next level of detail is added to the system specification. In the example in [40], the detailed design added more information about the system, all expressed in the formal specification language. The formal method design or model may be the detailed design, or it may be created in parallel with a normal design process. If using the parallel approach (standard software development life cycle and formal methods on separate tracks, usually with separate teams), it is important to verify that the designs created by the development team formally match those of the formal methods team. Formal Methods Benefit-to-Cost Rating: LOW Model Checking Benefit-to-Cost Rating: MEDIUM 5.3.13 Requirements State Machines Requirements State Machines (RSM) are sometimes called Finite State Machines (FSM). An RSM is a model or depiction of a system or subsystem, showing states and the transitions between the states. Its goal is to identify and describe ALL possible states and their transitions. RSM analysis can be used on its own, or as a part of a structured design environment, (e.g., Object Oriented Design (see  HYPERLINK \l "_4.2.3.1_Object_Oriented" Section 4.3.2.1) and Formal Methods (See  HYPERLINK \l "_4.2.4_Formal_Methods" Section 4.2.3 Formal Methods - Specification Development)). Whether or not Formal Methods are used to develop a system, a high level RSM can be used to provide a view into the architecture of an implementation without being engulfed by all the accompanying detail. Semantic analysis criteria can be applied to this representation and to lower level models to verify the behavior of the RSM and determine that its behavior is acceptable. Details on using Requirements State Machines are given in  HYPERLINK \l "_APPENDIX_D" Appendix D. Benefit-to-Cost Rating: LOW 5.3.14 Formal Inspections of Detailed Design Products The new software artifacts (detailed design) should be formally inspected. Update the inspection checklist with lessons learned, and add items appropriate to the detail now available. Pseudo-code or prototype code are often available for review at this stage. Benefit-to-Cost Rating: HIGH 5.3.15 Software Failure Modes and Effects Analysis A bottom up analysis technique is the FMEA (Failure Modes and Effects Analysis). It looks at how each component could fail, how the failure propagates through the system, and whether it can lead to a hazard. This technique requires a fairly detailed design of the system. In the Architectural Design phase, only a preliminary Software FMEA can be completed. A Software FMEA uses the methods of a standard (hardware) FMEA, substituting software components for hardware components in each case. A widely used FMEA procedure is MIL-STD-1629, which is based on the following steps:  Define the system to be analyzed. Construct functional block diagrams. Identify all potential item and interface failure modes. Evaluate each failure mode in terms of the worst potential consequences. Identify failure detection methods and compensating provisions. Identify corrective design or other actions to eliminate / control failure. Identify impacts of the corrective change. Document the analysis and summarize the problems which could not be corrected. More detailed information on SFMEA (Software Failure Modes and Effects Analysis) can be found in HYPERLINK \l "_APPENDIX_C_Software"Appendix C. Benefit-to-Cost Rating: MEDIUM 5.3.16 Updates to Previous Analyses Now that the detailed design is completed, the SFTA and Timing/Throughput/Sizing analyses should be updated. Enough detail exists to complete the Software Fault Tree Analysis, though it should be reviewed in the later phases, since things often change during coding. The Criticality Analysis should be updated. Modules may have been subdivided into smaller components or rearranged within the design. The SCCSCs (Safety Critical Computer Software Component) should be reviewed for changes. This is also the time to look for additional modules that should be classified as safety critical. 5.4 Code Analysis Code analysis verifies that the coded program correctly implements the verified design and does not violate safety requirements. Having the code permits real measurements of size, complexity and resource usage of the software. These quantities could only be estimated during the design phase, and the estimates were often just educated guesses. The results of code analyses may lead to significant redesign if the analyses show that the guess were wildly incorrect. However, the main purpose is to verify that the code meets the requirements (traceable through the design) and that it produces a safe system.. Code Analyses include the following: Code Logic Analysis Code Data Analysis Code Interface Analysis Measurement of Complexity Code Constraint Analysis Formal Code Inspections, Checklists, and Coding Standards Formal Methods Unused Code Analysis Interrupt Analysis Update to Timing/Throughput/Sizing analysis Update to Software Failure Modes and Effects Analysis Some of these code analysis techniques mirror those used in detailed design analysis. However, the results of the analysis techniques might be significantly different than during earlier development phases, because the final code may differ substantially from what was expected or predicted. Many of these analyses will be undergoing their second iteration, since they were applied previously to the code-like products (PDL) of the detailed design. There are some commercial tools available which perform one or more of these analyses in a single package. These tools can be evaluated for their validity in performing these tasks, such as logic analyzers, and path analyzers. However, unvalidated COTS tools, in themselves, cannot generally be considered valid methods for formal safety analysis. COTS tools are often useful to reveal previously unknown defects. Note that the definitive formal code analysis is that performed on the final version of the code. A great deal of the code analysis is done on earlier versions of code, but a complete check on the final version is essential. For safety purposes it is desirable that the final version have no instrumentation (i.e., extra code) added to detect problems, such as erroneous jumps. The code may need to be run on an instruction set emulator which can monitor the code from the outside, without adding the instrumentation, if such problems are suspected. 5.4.1 Code Logic Analysis Code logic analysis evaluates the sequence of operations represented by the coded program. Code logic analysis will detect logic errors in the coded software. This analysis is conducted by performing logic reconstruction, equation reconstruction and memory decoding. For complex software, this analysis is applied to all safety critical components (SCCSCs). Other software components may be analyzed if they are deemed important to the system functionality. Logic reconstruction entails the preparation of flow charts from the code and comparing them to the design material descriptions and flow charts. Equation reconstruction is accomplished by comparing the equations in the code to the ones provided with the design materials. Memory decoding identifies critical instruction sequences even when they may be disguised as data. The analyst should determine whether each instruction is valid and if the conditions under which it can be executed are valid. Memory decoding should be done on the final un-instrumented code. Benefit-to-Cost Rating: LOW 5.4.2 Code Data Analysis Code data analysis concentrates on data structure and usage in the coded software. Data analysis focuses on how data items are defined and organized. Ensuring that these data items are defined and used properly is the objective of code data analysis. This is accomplished by comparing the usage and value of all data items in the code with the descriptions provided in the design materials. Of particular concern to safety is ensuring the integrity of safety critical data against being inadvertently altered or overwritten. For example, check to see if interrupt processing is interfering with safety critical data. Also, check the typing of safety critical declared variables. Benefit-to-Cost Rating: MEDIUM 5.4.3 Code Interface Analysis Code interface analysis verifies the compatibility of internal and external interfaces of a software component. A software component is composed of a number of code segments working together to perform required tasks. These code segments must communicate with each other, with hardware, other software components, and human operators to accomplish their tasks. Check that parameters are properly passed across interfaces. Each of these interfaces is a source of potential problems. Code interface analysis is intended to verify that the interfaces have been implemented properly. Hardware and human operator interfaces should be included in the Design Constraint Analysis discussed in  HYPERLINK \l "_5.4.5_Update_Design" Section 5.4.5 Update Design Constraint Analysis. Benefit-to-Cost Rating: HIGH Rating: MEDIUM 5.4.4 Update Measurement of Complexity Now that code exists, the complexity metrics can be recalculated. Complex code should be evaluated by a human. Some logic structures (such as case statements) may be flagged as complicated, when they really improve the comprehensibility of the software. Complex software increases the number of errors, while making it difficult to find them. This makes the software more likely to be unstable, or suffer from unpredictable behavior. Reducing complexity is generally a good idea, whenever possible. Modularity is a useful technique to reduce complexity. Encapsulation can also be used, to hide data and functions from the user (the rest of the software), and prevent their unanticipated execution. Software flagged as complex should be analyzed in more depth, even if it is not safety critical. These modules are prime candidates for formal inspections and the logic/data/constraint analyses. 5.4.5 Update Design Constraint Analysis The criteria for design constraint analysis applied to the detailed design in  HYPERLINK \l "_5.3.4_Design_Constraint" Section 5.3.4 Design Constraint Analysis, can be updated using the final code. At the code phase, real testing can be performed to characterize the actual software behavior and performance in addition to analysis. The physical limitations of the processing hardware platform should be addressed. Timing, sizing and throughput analyses should also be repeated as part of this process (see  HYPERLINK \l "_5.4.9__Final" section 5.4.10) to ensure that computing resources and memory available are adequate for safety critical functions and processes. Underflows/overflows in certain languages (e.g., ADAAda) give rise to exceptions or error messages generated by the software. These conditions should be eliminated by design if possible; if they cannot be precluded, then error handling routines in the application must provide appropriate responses, such as automatic recovery, querying the user (retry, etc.), or putting the system into a safe state. 5.4.6 Formal Code Inspections, Checklists, and Coding Standards Formal Inspections, introduced in  HYPERLINK \l "_4.2.6_Formal_Inspections" Section 4.2.5 Formal Inspections, should be performed on the safety critical software components, at a minimum. Consider doing Formal Inspections on other complex or critical software modules. Formal Inspections are one of the best methodologies available to evaluate the quality of code modules and program sets. Having multiple eyes and minds review the code, in a formal way, makes errors and omissions easier to find. Checklists should be developed for use during formal inspections to facilitate inspection of the code. They should include: requirements information for modules under review design details for modules under review coding standards (subset/most important) language-independent programming errors language-specific programming errors HYPERLINK \l "_E.4_Checklist_of_1"Appendix E contains a sample checklists of common errors, both independent of language and language specific. Coding standards are based on style guides and safe subsets of programming languages. They should have been specified during the design phase (or earlier), and used throughout the coding (implementation) phase. Benefit-to-Cost Rating: HIGH 5.4.7 Applying Formal Methods to Code Generation of code is the ultimate output of Formal Methods. In a "pure" Formal Methods system, analysis of code is not required. In practice, however, attempts are often made to "apply" Formal Methods to existing code after the fact. In this case the analysis techniques of the previous sections may be used to "extract" the logic of the code, and then compare the logic to the formal requirements expressions from the Formal Methods. Formal Methods Benefit-to-Cost Rating: LOW Model Checking Benefit-to-Cost Rating: LOW 5.4.8 Unused Code Analysis A common real world coding error is generation of code which is logically excluded from execution; that is, preconditions for the execution of this code will never be satisfied. Such code is undesirable for three reasons; a) it is potentially symptomatic of a major error in implementing the software design; b) it introduces unnecessary complexity and occupies memory or mass storage which is often a limited resource; and c) the unused code might contain routines which would be hazardous if they were inadvertently executed (e.g., by a hardware failure or by a Single Event Upset. SEU is a state transition caused by a high speed subatomic particle passing through a semiconductor - common in nuclear or space environments). There is no particular analysis technique for identifying unused code; however, unused code is often identified during the course of performing other types of code analysis. Unused code can be found during unit testing with COTS coverage analyzer tools. Care should be taken to ensure that every part of the code is eventually exercised (tested) at some time, within all possible operating modes of the system. Benefit-to-Cost Rating: MEDIUM 5.4.9 Interrupt Analysis Interrupt Analysis looks at how interrupts are used by the software. The effect of interrupts on program flow and data corruption are the primary focus of this analysis. Can interrupts lead to priority inversion and prevent a high priority or safety critical task from completing? If interrupts are locked out for a period of time, can the system stack incoming interrupts to prevent their loss? Can a low-priority process interrupt a high-priority process and change critical data? When performing interrupt analysis, consider the following areas of the code: Program segments/modules where interrupts are inhibited (locked out). Look at how long the interrupts are inhibited and whether the system can buffer interrupts for this period of time. The expected and maximum interrupt rates would be needed to check for buffering capacity. Identify impacts from lost interrupts. Look for possible infinite loops. Re-entrant code. Re-entrant code is designed to be interrupted without loss of state information. Check that re-entrant modules have sufficient data saved for each interruption, and that the data and system state are correctly restored. Make sure that modules that need to be re-entrant are implemented as such. Interruptible code segments/modules. Make sure that timing-critical areas are protected from interrupts, if a delay would be unacceptable. Check for sequences of instructions that should not be interrupted. Priorities. Look over the process priorities of the real-time tasks. Verify that time-critical events will be assured of execution. Also consider the operator interface. Will the interface update with important or critical information in a timely fashion? Undefined interrupts. What happens when an undefined interrupt is received? Is it ignored? Is any error processing required? Benefit-to-Cost Rating: HIGH 5.4.9 10 Final Timing, Throughput, and Sizing Analysis With the completion of the coding phase, the timing, throughput, and sizing parameters can be measured. The size of the executable module (storage size) is easily measured, as is the amount of memory space used by the running software. Special tests may need to be run to determine the maximum memory used, as well as timing and throughput parameters. Some of these tests may be delayed until the testing phase, where they may be formally included in functional or load/stress tests. However, simple tests should be run as soon as the appropriate code is stable, to allow verification of the timing, throughput, and sizing requirements. The earlier a problem is discovered, the easier and cheaper it is to fix. Benefit-to-Cost Rating: HIGH 5.4.1011 Program Slicing When you get a wrong answer from your software, program slicing can help. It is a technique to trace back through the program and show you all, and only, the statements that affect the variable you are interested in. In a large, complex program, slicing can cut through the extraneous (to the problem) information, focusing in on the statements of interest. Slicing has been mainly used in debugging (finding the source of an error) and reengineering (pulling out part of a program to create a new program). It can also be used to check the lineage of any safety-critical data. Using a slicing tool to pull out all the statements that affect the safety-critical variable, and then examining the results, may point to errors or unexpected interactions with other, non-critical data. You may even wish to do a Formal Inspection on the sliced code. Slicing comes in two forms: static and dynamic. Static slicing, introduced in 1982, is done on the source code (compile-time). Originally, it had to be an executable subset of the program, though that is not always necessary. Static slicing shows every statement that may have an impact of the variable of interest. Dynamic slicing first appeared around 1988, and works on programs as they operate (run-time). While static slicing shows all the statements that may affect the variable of interest, dynamic slicing shows only those that do affect the variable as the software is exercised. Program slicing by hand would be a tedious job. Tools are beginning to be available for a variety of languages. Benefit-to-Cost Rating: MEDIUM 5.4.1112 Update Software Failure Modes and Effects Analysis Review any changes to the design that developed during the coding phase. Often creating the actual coding will point out problems with the design, or elements that are missing. If the design was modified during this phase, review the Software FMEA and make any updates as necessary. 5.5 Test Analysis Two sets of analyses should be performed during the testing phase: analyses before the fact to ensure validity and completeness of tests analyses of the test results Testing (as opposed to analyses) was discussed in  HYPERLINK \l "_4.6__Software" Section 4.6 Software Integration and Test and will not be covered here. Analysis before the fact should, as a minimum, consider test coverage for safety critical Must-Work-Functions and Must-Not-Work-Functions. 5.5.1 Test Coverage For small pieces of code it is sometimes possible to achieve 100% test coverage (i.e., to exercise every possible state and path of the code). However, it is often not possible to achieve 100 % test coverage due to the enormous number of permutations of states in a computer program execution, versus the time it would take to exercise all those possible states. Also there is often a large indeterminate number of environmental variables, too many to completely simulate. Some analysis is advisable to assess the optimum test coverage as part of the test planning process. There is a body of theory which attempts to calculate the probability that a system with a certain failure probability will pass a given number of tests. This is discussed in "Evaluation of Safety Critical Software", David L. Parnas, A. John van Schouwen and Shu Po Kwan, Communications of ACM, June 1990 Vol 33 Nr 6 [43]. White box testing assumes that the tester has knowledge of the internal workings of the module or program to be tested. It is usually used with unit (module, file, etc.) testing. Examples of white box tests are path tests (all paths through the code), branch testing (verify each branch taken), checking each assignment to memory, and verifying that each statement is executed at least once. Black box testing assumes that the tester has no knowledge of what happens inside the software. Only the inputs and outputs are accessible. Black box tests usually are functional tests that exercise the normal operations of the software. In addition, off nominal tests are done to verify the software operates correctly with erroneous input. Statistical methods such as Monte Carlo simulations can be useful in planning "worst case" credible scenarios to be tested. Test coverage analysis is best if done prior to the start of testing. At a minimum, analysis should be done to verify that the planned tests cover all paths through the program, that all branches are exercised, and that each statement is executed at least once. Verify that boundary conditions are tested for all inputs, as well as nominal and erroneous input values. Benefit-to-Cost Rating: HIGH 5.5.2 Formal Inspections of Test Plan and Procedures Test plans should be created early in the software development lifecycle. Once the requirements are known, a test plan that addresses how the requirements will be verified can be developed. Functional testing, acceptance testing, and off-nominal testing should be included, at a minimum. Test procedures are the specifics of what is being tested, how to conduct the test, and what the expected results are. The procedures should reference the specific requirements verified by the test. Places to check off the steps should be provided. Important sections, including safety verification steps, have signature blocks for witnesses. The test plan and test procedures should be reviewed by the safety engineer for a project at the safety minimum level. For higher safety levels, the plan and procedures should undergo formal inspections. (Formal inspections are discussed in  HYPERLINK \l "_4.2.6_Formal_Inspections" Section 4.2.5 Formal Inspections.) Benefit-to-Cost Rating: HIGH 5.5.3 Reliability Modeling Software reliability contributes to software safety. If the software operates for a long period of time without a failure, then it will be safe for that period of time, assuming that an operational (non-failed) mode cannot lead to a hazard. According to the ANSI standard, software reliability is defined as the probability of failure-free operation of a computer program for a specified time in a specified environment. Reliability modeling is the process of determining what that the probability and the specified time are. The primary goal of software reliability modeling is to answer the question: Given a system, what is the probability that it will fail in a given time interval, or, what is the expected duration between successive failures? Software reliability models come in several flavors. Prediction models attempt to predict what the reliability of the system will be when it is completed. Prediction models may be developed as early as the requirements phase, or in the design or implementation phase. The questions that a predictive model tries to answer are: Can we reach the reliability goals or requirements? How reliable will the system truly be? Resources that prediction models may use are the failure data of the current system (if it is in test), metrics from the software development process, and failure data from similar systems. Estimation models evaluate the current software, usually during the test phase. Based on defects found and other factors, the models attempt to estimate how many defects still remain or the time between failures, once the software is in operation. Estimation models include reliability growth models, input domain models, and fault seeding models. Over 40 varieties of prediction and estimation software reliability models exist. The accuracy of the models varies with the model, the project, and the expertise of the analyst. Benefit-to-Cost Rating: LOW 5.5.3.1 Criteria for Selecting a Reliability Model Model validity. How good is the model at accurately measuring the current failure rate? At predicting the time to finish testing with associated date and cost? At predicting the operational failure rate? Ease of measuring parameters. What are the cost and schedule impacts for data (metrics) collection? How physically significant are the parameters to the software development process? Quality of assumptions. How close are the model assumptions to the real world? Is the model adaptable to a special environment? Applicability. Can the model handle program evolution and change in test and operational environments? Simplicity. Is the model simple in concept, data collection, program implementation, and validation? Insensitivity to noise. Is the model insensitive to insignificant changes in input data and parameters, without losing responsiveness to significant differences? Usefulness. Does the model estimate quantities that are useful to project personnel? 5.5.3.2 Issues and Concerns Ideally, one simple reliability model would be available, with great tool support, that would easily and accurately predict or estimate the reliability of the software under development. The current situation, however is that Over 40 models have been published in the literature. The accuracy of the models is variable. You cant know ahead of time which model is best for your situation. Some aspects of the models that are a cause for concern are: How accurate is the data collected during testing? How easy is it to collect that data? Models are primarily used during the testing phase, which is late in the development cycle. Estimation of parameters is not always possible, and sometimes it is mathematically intractable. Reliable models for multiple systems have not been developed. There is no well-established criteria for model selection. 5.5.3.3 Tools In the last decade, tools have become available to aid in software reliability modeling. Most of the established models have tools that support them. Resources for information on available tools are: Applying Software Reliability Engineering in the 1990s, W. Everett, S. Keene, and A. Nikora, IEEE Transaction on Reliability, Vol. 47, No. 3-SP, September 1998 Software Reliability Engineering Study of a Large-Scale Telecommunications Software System, Carman et. al., Proc. 1995 International Symposium on Software Reliability Engineering, Toulouse, France, Oct. 1995, pp. 350-.  HYPERLINK "http://rac.iitri.org/DATA/RMST/rel_model.html" http://rac.iitri.org/DATA/RMST/rel_model.html Links to many tools MEADEP tool.  HYPERLINK "http://www.meadep.com/" http://www.meadep.com/ Reliability Modeling, Developed by C. Chay and W. Leyu,  HYPERLINK "http://www.icaen.uiowa.edu/~ankusiak/reli.html" http://www.icaen.uiowa.edu/~ankusiak/reli.html 5.5.3.4 Dissenting Views Not everyone agrees that software reliability modeling is a useful technique. Some are concerned about the applicability of the models to real-world situations. Most models assume random failures, but is that true? The models do not usually address the fact that fixing a failure may add other errors to the software. The fact that software is often unique (one-of-a-kind) makes statistics about the error rates difficult to apply across a broad spectrum of programs. Unlike hardware, you are dealing with one part, not one of many identical units. A critic critique of software reliability modeling is found in [46]. The authors assert that current models do not adequately deal with these factors: Difficulties in estimating Operational Profiles, such as the input distribution (what is input, when, in what order). New software may have no history or customer base to use to determine typical operations. It is non-trivial to determine how the system will be used, but such an operational profile is a key element for most reliability models. Problems with reliability estimation. Inadequate test sets, failure to exercise each feature in testing, and skewed operational profile (critical functions may not be part of the typical profile) make reliability difficult to estimate accurately. Reliability estimation occurs near the end of development. Individual component reliability is not known, just for the full system. There is no information to feed back that may lead to process improvement and better reliability in future projects. Saturation effects lead to reliability overestimation. Most testing techniques reach a saturation point past which they are unable to find defects. These limits can lead to an overestimate of the software reliability. 5.5.3.5 Resources The following papers and websites provide useful information on software reliability modeling: Software Reliability Assurance Handbook,  HYPERLINK "http://www.cs.colostate.edu/~cs630/rh/" http://www.cs.colostate.edu/~cs630/rh/ Software Reliability Modeling Techniques and Tools, Michael R. Lyu and Allen P. Nikora, ISSRE93 Tutorial, November, 1993  HYPERLINK "http://techreports.jpl.nasa.gov/1993/93-1886.pdf" http://techreports.jpl.nasa.gov/1993/93-1886.pdf Software Reliability: To Use or Not To Use?, a panel discussion chaired by Michael Lyu,  HYPERLINK "http://www.stsc.hill.af.mil/crossTalk/1995/feb/Reliable.asp" http://www.stsc.hill.af.mil/crossTalk/1995/feb/Reliable.asp Applying Software Reliability Engineering in the 1990s, W. Everett, S. Keene, and A. Nikora, IEEE Transaction on Reliability, Vol. 47, No. 3-SP, September 1998 Software Reliability: Assumptions, Realities and Data, Michel Defamie, Patrick Jacobs, and Jacques Thollembeck, Proceedings of the IEEE International Conference on Software Maintenance, 1998 Software Reliability Engineering Study of a Large-Scale Telecommunications Software System, Carman et. al., Proc. 1995 International Symposium on Software Reliability Engineering, Toulouse, France, Oct. 1995, pp. 350-. Predicting Software Reliability, Alan Wood, IEEE Computer, Vol. 29, No. 11, November 1996 Software Metrics and Reliability, Dr. Linda Rosenberg, Ted Hammer, and Jack Shaw,  HYPERLINK "http://satc.gsfc.nasa.gov/suport/software_metrics_and_reliability.html" http://satc.gsfc.nasa.gov/support/ISSRE_NOV98/software_metrics_and_reliability.html Reliability Modeling for Safety-Critical Software, Norman F. Schneidewind, IEEE Transactions on Reliability, Vol. 46, No.1, March 1997, pp. 88-98 Handbook of Software Reliability Engineering (Book), Edited by Michael R. Lyu, Published by IEEE Computer Society Press and McGraw-Hill Book Company,  HYPERLINK "http://www.cse.cuhk.edu.hk/~lyu/book/reliability/" http://www.cse.cuhk.edu.hk/~lyu/book/reliability/ 5.5.4 Checklists of Tests The software development group (or the safety engineer) should create a list of all tests that will be done on the software. HYPERLINK \l "_4.6__Software"Section 4.6 Software Integration and Test discusses the different variety of tests that can be conducted. The test checklist should be maintained by both the development and safety personnel. That provides a cross-check, to make sure no tests are accidentally missed. Benefit-to-Cost Rating: HIGH 5.5.5 Test Results Analysis Once tests are conducted (and witnessed), a test report is written that describes what was done and how the results match (or differ from) the expected results. The safety engineer uses these test reports, and problem/resolution reports, to verify that all safety requirements have been satisfied. The test results analysis also verifies that all identified hazards have been eliminated or controlled to an acceptable level of risk. The results of the test safety analysis are provided to the ongoing system safety analysis activity. All test discrepancies of safety critical software should be evaluated and corrected in an appropriate manner. Benefit-to-Cost Rating: HIGH 5.5.6 Independent Verification and Validation For high value systems with high risk software, an IV&V organization is usually involved to oversee the software development. Verification & Validation (V&V) is a system engineering process employing a variety of software engineering methods, techniques, and tools for evaluating the correctness and quality of a software product throughout its life cycle. IV&V is performed by an organization that is technically, managerially, and financially independent of the development organization. IV&V should supplement, not supercede, the in-house software quality/product assurance efforts. Software QA and safety should still be involved with the project from the start, reviewing documents, offering advice and suggestions, and monitoring the software development process. Depending on what is negotiated with the project, the IV&V personnel may be a second set of eyes, shadowing the software QA, conducting independent audits, witnessing testing, etc. This requires the IV&V person to be stationed with the project, or to visit frequently. A more remote form of IV&V involves reviewing the software products (plans, designs, code, test results, code review reports, etc.), with a few in-person audits to verify the software development process. The IV&V organization should fully participate in the validation of test analyses and traceability back to the requirements. Benefit-to-Cost Rating: MEDIUM 5.5.7 Resources  HYPERLINK "http://www.chillarege.com/authwork/TestingBestPractice.pdf" http://www.chillarege.com/authwork/TestingBestPractice.pdf provides information on the testing, and test development, process. Software Testing Hotlist, Resources for Professional Software Testers,  HYPERLINK "http://www.io.com/~wazmo/qa/" http://www.io.com/~wazmo/qa/, is a very good reference site for testing information. The Software QA and Testing Resource Center,  HYPERLINK "http://www.sqatest.com"   HYPERLINK "http://www.softwareqatest.com/" http://www.softwareqatest.com/, also provides useful information on testing and the QA process. 5.6 Operations & Maintenance Maintenance of software is describe in  HYPERLINK \l "_4.8_Software_Operations" Section 4.8 Software Operations & Maintenance. During the operational phase of a safety critical software set, rigorous configuration control must be enforced. For every proposed software change, it is necessary to repeat each development and analysis task performed during the life cycle steps previously used for each modification, from requirements (re-)development through code (re-)test. The safety analyst must ensure that proposed changes do not disrupt or compromise pre-established hazard controls. It is advisable to perform the final verification testing on an identical off-line analog (or simulator) of the operational software system, prior to placing it into service. 6. SOFTWARE DEVELOPMENT ISSUES In this chapter, well look at various programming languages, operating systems, tools, and development environments being used in safety critical software. Also included are various new technologies that have particular (and usually unsolved) problems with determining their safety. Finally, a sampling good programming practices specific to safety issues is presented. Choosing a programming language is a necessity for any project. This sections examines a subset of the available languages, as there are over a hundred languages. The languages considered are those that are commonly used in safety critical or embedded environments. Also considered are languages that might be considered, because they are popular or new. For each language, any safety-related strengths are discussed, and guidance is given on what aspects to avoid. Safer software can be written in any language. Coding standards can designate how to program in a particular language to produce safer code. But were human. We make mistakes, we get in a hurry, and the coding standards may not be followed. Languages that are safer are those that enforce the standards, that check for common errors, and that do so as early as possible! This chapter will also look at the environment the software will be developed and run in. Issues with compilers, tools, Integrated Development Environments (IDEs), automatic code generation, and operating systems (especially Real-Time (RTOS)) will be considered. Why does software have bugs? 1 miscommunication or no communication - as to specifics of what an application should or shouldn't do (the application's requirements). software complexity - the complexity of current software applications can be difficult to comprehend for anyone without experience in modern-day software development. Windows-type interfaces, client-server and distributed applications, data communications, enormous relational databases, and sheer size of applications have all contributed to the exponential growth in software/system complexity. And the use of object-oriented techniques can complicate instead of simplify a project unless it is well-engineered. programming errors - programmers, like anyone else, can make mistakes. changing requirements - the customer may not understand the effects of changes, or may understand and request them anyway - redesign, rescheduling of engineers, effects on other projects, work already completed that may have to be redone or thrown out, hardware requirements that may be affected, etc. If there are many minor changes or any major changes, known and unknown dependencies among parts of the project are likely to interact and cause problems, and the complexity of keeping track of changes may result in errors. Enthusiasm of engineering staff may be affected. In some fast-changing business environments, continuously modified requirements may be a fact of life. In this case, management must understand the resulting risks, and QA and test engineers must adapt and plan for continuous extensive testing to keep the inevitable bugs from running out of control - see 'What can be done if requirements are changing continuously?' in Part 2 of the FAQ. time pressures - scheduling of software projects is difficult at best, often requiring a lot of guesswork. When deadlines loom and the crunch comes, mistakes will be made. egos - people prefer to say things like: 'no problem' 'piece of cake' 'I can whip that out in a few hours' 'it should be easy to update that old code' instead of: 'that adds a lot of complexity and we could end up making a lot of mistakes' 'we have no idea if we can do that; we'll wing it' 'I can't estimate how long it will take, until I take a close look at it' 'we can't figure out what that old spaghetti code did in the first place' If there are too many unrealistic 'no problem's', the result is bugs. poorly documented code - it's tough to maintain and modify code that is badly written or poorly documented; the result is bugs. In many organizations management provides no incentive for programmers to document their code or write clear, understandable code. In fact, it's usually the opposite: they get points mostly for quickly turning out code, and there's job security if nobody else can understand it ('if it was hard to write, it should be hard to read'). software development tools - visual tools, class libraries, compilers, scripting tools, etc. often introduce their own bugs or are poorly documented, resulting in added bugs. 1The list above was taken with permission from the Software QA and Testing Frequently Asked Questions.  HYPERLINK "http://www.softwareqatest.com" http://www.softwareqatest.com 1996-2000 by Rick Hower 6.1 Safe Subsets of Languages A safe subset of a language is one that restricts certain features that are error-prone or are undefined or poorly defined. In some cases, a subset may be created by a particular vendor, or may grow out of the user community. In many cases, a standard subset does not exist, but coding standards are used to create the subset. Using coding standards means that the compiler will not enforce the subset, however. There are two primary reasons for restricting a language definition to a subset: 1) some features are defined in an ambiguous manner 2) some features are excessively complex or error-prone. A language is considered suitable for use in a safety-critical application if it has a precise definition (complete functionality as well), is logically coherent, and has a manageable size and complexity. The issue of excessive complexity makes it virtually impossible to verify certain language features. Overall, the issues of logical soundness and complexity will be the key toward understanding why a language is restricted to a subset for safety-critical applications. Compilers for safer language subsets are often certified to provide correct translation from the source code to object code. The subset usually undergoes vigorous study and verification before it is accepted by the user community. Besides formal language subsets, safety specific coding standards are used to specify requirements for annotation of safetycritical code and prohibit use of certain language features which can reduce software safety. Avoid including programming style requirements in a coding standard. Put those in a separate coding style document. While you want programmers to use the same style, it is far more important that they following the safety-related coding standards. A style war can lead to programmers ignoring the whole document, if style and standards are mixed. 6.2 Insecurities Common to All Languages All programming languages have insecurities either in their definition or their implementation. Newer languages (or updates to existing language standards) try to correct the shortfalls of older generation languages, while adding additional functionality. In reality, they often add new insecurities as well. Some common problems are: Use of uninitialized variables. Uninitialized variables are the most common error in practically all programming languages. In particular, uninitialized or improperly initialized pointers (in languages that support them) often cause insidious errors. This mistake is very hard to catch because unit testing will not flag it unless explicitly designed to do so. The typical manifestation of this error is when a program that has been working successfully is run under different environmental conditions and the results are not as expected. Memory management concerns. Calls to deallocate memory should be examined to make sure that not only is the pointer released but that the memory used by the structure is released. Also, it is important to verify that only one deallocation call is made for a particular memory block. On the other side of the problem, memory that is not deallocated when no longer used will lead to a memory leak, and perhaps to an eventual system crash. Unspecified compiler behavior. The order operands are evaluated in is often not defined by the language standard, and is left up to the compiler vendor. Depending on the order of evaluation for certain side effects to be carried out is poor programming practice! The order of evaluation may be understood for this compiler and this version only. If the program is compiled with a different vendor, or a different version, the side effects may well change. Other unspecified behavior may include the order of initialization of global or static variables. 6.3 Method of Assessment When comparing programming languages, we will not deal with differences among vendor implementations. Compiler implementations, by and large, do not differ significantly from the intent of the standard. However, standards are not unambiguous and they are interpreted by the vendor. Be aware that implementations will not adhere 100% to the standard because of the extremely large number of states a compiler can produce. We will present information on the strengths and weaknesses of popular programming languages, and discuss safety related concerns. Common errors specific to the language will be discussed as well. When evaluating a language, the following questions should be asked of the language as a minimum: Can it be shown that the program cannot jump to an arbitrary location? Are there language features that prevent an arbitrary memory location from being overwritten? Are the semantics of the language defined sufficiently for static code analysis to be feasible? Is there a rigorous model of both integer and floating point arithmetic within the standard? Are there procedures for checking that the operational program obeys the model of the arithmetic when running on the target processor? Are the means of typing strong enough* to prevent misuse of variables? Are there facilities in the language to guard against running out of memory at runtime? Does the language provide facilities for separate compilation of modules with type checking across module boundaries? Is the language well understood so designers and programmers can write safety-critical software? Is there a subset of the language which has the properties of a safe language as evidenced by the answers to the other questions? *Strong typing implies an explicit data type conversion is required when transforming one type to another 6.4 Languages There are over one hundred programming languages, with more being developed each year. Many are generated within academia as part of a research project. However, the subset of well established languages is more limited. The following languages will be examined in detail, focusing on the strengths and weaknesses each has with regards to producing safe software. Ada83, Ada95 and safe subsets Assembly Languages C C++ C# Forth FORTRAN Java LabVIEW Pascal Visual Basic 6.4.1 Ada83 and Ada95 Languages One of the most commonly used language in military and safety critical applications is Ada. From the inception of Ada83 until 1997, Ada was mandated by the Department of Defense for all weapons-related and mission critical programs. Though currently not mandated, Ada is still commonly used within military projects. In addition, safety critical commercial software is being written in Ada. Ada is also the primary language of the International Space Station. The Ada language was designed with safety and reliability in mind. The goal of Ada is to maximize the amount of error detection as early in the development process as possible. The Ada standard was first released on 17th February 1983 as ANSI/MIL-STD-1815A Reference Manual for the Ada Programming Language. This original version is now called Ada83. The first major revision of the Ada standard was released on 21 December 1994 via ISO/IEC 8652:1995(E), and is commonly known as Ada95. Ada95 corrects many of the safety deficiencies of Ada83 and adds full object oriented capabilities to the language The strengths of Ada95 lie in the following attributes: Object orientation Ada95 supports all the standard elements of object orientation: encapsulation of objects, inheritance, and polymorphism. Encapsulation hides information from program elements that do not need to know about it, and therefore decreases the chance of the information being altered unexpectedly. Inheritance and polymorphism contribute to the extensibility of the software. Software reuse is one plus of object orientation. A previously tested object can be extended, with new functionality, without breaking the original object. Strongly typed Ada enforces data typing. This means that you cannot use an integer when a floating point number is expected, unless you explicitly convert it. Nor can you access an integer array through a character pointer. Strong typing finds places where the programmer assumed one thing, but the source code actually lead to another implementation. Forcing conversions helps the programmer think about what she is doing, and why, rather than allowing the compiler to make implicit (and perhaps undefined) conversions. Range checking Range checking for arrays, strings, and other dimensioned elements is included in the language. This prevents accidentally overwriting memory outside of the array. The compiler will usually find the error. If not, a Run Time Exception (RTE) will be generated. Also included is checking for references to null. Support for multitasking and threads Tasking is built into the language. Support is included to deal with threads and concurrency issues. Protected objects provide a low overhead, data-oriented synchronization mechanism. Asynchronous transfer of control, with clean up of the interrupted process, is part of the language. Clarity of source code Ada code is closer to regular language than most languages, and this makes it easy to read. Often, coming back to code you wrote awhile ago is difficult. Much of the context has been forgotten, and it may be difficult to understand why you did something. When the code is easy to read, those problems are reduced. Also, when the code is reviewed or inspected, others find it easier to understand. Mixed language support Ada allows modules written in other languages to be used. Usually, just a wrapper must be created before the non-Ada routines can be accessed. This allows well-tested, legacy code to be used with newer Ada code. Real-time system support Ada95 has added support for real-time systems. Hard deadlines can be defined. Protected types give a low overhead type of semaphore. Dynamic task priorities is the mechanism to set the priority of a task at run-time, rather than compile-time, and is supported. Priority inversion, used to prevent deadlock when a high priority task needs a resource being used by a lower priority task, can be bounded. This allows Rate Monotonic Analysis to be used. Distributed systems support A unit in an Ada95 distributed system is called a partition. A partition is an aggregation of modules that executes in a distributed target environment. Typically, each partition corresponds to a single computer (execution site). Communication among partitions of a distributed system is based upon extending the remote procedure call paradigm. Exception handling Exceptions are raised by built-in and standard library functions, when events such as an integer overflow or out-of-bounds check occurs. Exceptions can also be raised by the program specifically, to indicate that the software reached an undesirable state. Exceptions are handled outside of the normal program flow, and are usually used to put the software into a known, safe state. The exception handler, written by the programmer, determines how the software deals with the exception. Support for non-object-oriented (traditional) software development styles Though Ada95 supports object-oriented programming, the language can be used with other styles as well. Functional (structural) programming techniques can be used with the language. Additional safety-related features are Compiler validation All Ada compilers must be validated. This means the compiler is put through a standard set of tests before it is declared a true Ada compiler. This does not mean that the compiler does not have defects, however. When choosing a compiler, ask for the history list of defects. Language restriction ability Ada95 added a restriction pragma. This allows features of the language to be turned off. You can specify a subset of the language, removing features that are not needed or that may be deemed unsafe. If a feature is not included, it does not have to be validated, thus reducing the testing and analysis effort. Validity checking of scalar values Ada95 has added a Valid attribute which allows the user to check whether the bit-pattern for a scalar object is valid with respect to the object's nominal subtype. It can be used to check the contents of a scalar object without formally reading its value. Using this attribute on an uninitialized object is not an error of any sort, and is guaranteed to either return True or False (and not raise an exception). The results are based on the actual contents of the object and not on what the optimizer might have assumed about the contents of the object based on some declaration. Valid can also be used to check data from an unchecked conversion, a value read from I/O, an object for which pragma Import has been specified, and an object that has been assigned a value where checks have been suppressed. Reviewable object code Ada provides mechanisms to aid in reviewing the object code produced by the compiler. Because the compiler will have defects, it is important in safety critical applications to review the object code itself. The pragma Reviewable can be applied to a partition (program) so that the compiler can provide the necessary information. The compiler vender should produce information on the ordering of modules in the object code, what registers are used for an object (and how long is the assignment valid), and what machine code instructions are used. In addition, a way to extract the object code for a particular module, so that other tools can use it, is suggested. Other information should support Initialization Analysis (what is the initialization state of all variables), determining the relationship between Source and Object Code, and Exception Analysis (indicating where compiler-generated run-time checks occur in the object code and which exceptions can be raised by any statement) may also be supported by the compiler vendor. An Inspection Point pragma provides a hook into the program similar to a hardware test point. At the inspection point(s) in the object code, the values of the specified objects, or all live objects, can be determined. However, no language is perfect. Ada95 does not detect the use of uninitialized variables, though using the Normalize_Scalars pragma will help. Some aspects of the language to consider restricting, for a safety critical application, are: The ability to turn off the type checking and other safety features Garbage collectionturn it off if timing issues are important. Ada Programming Guidelines, by Rational Software Corporation, are available at  HYPERLINK "http://www.cs.hmc.edu/tech_docs/qref/rational/DevelopmentStudioUNIX.1.1/docs/html/rup_ada/ada.htm" http://www.cs.hmc.edu/tech_docs/qref/rational/DevelopmentStudioUNIX.1.1/docs/html/rup_ada/ada.htm 6.4.2 Assembly Languages Assembly languages are the human-readable version of machine code. They are specific to a processor or processor family. If an operating system is used, the method to access the functions of the OS are specific to that system. Programming in assembly requires intimate knowledge of the workings of the processor. Modern assembly languages include macros that allow higher level logic flow, such as ifelse statements and looping. Variables can be declared and named. Subroutines (procedures) can also be declared and called. All higher level constructs improve the readability and maintainability of the assembly program. Few large programs are written entirely in assembly language. Often, a small section of the software will be rewritten in assembly to increase execution speed or decrease code size. Also, the code used on bootup of a system (that loads the operating system) and BIOS-like utilities are often written in assembly. Interrupt service routines are another place you will find assembly language used. In addition, software that runs on small microcontrollers are often space-limited and therefore assembly coding is a good alternative to a high-level language. Why use assembly: Execution Speed Smaller code size Ability to do something that higher level languages do not allow. Tweaking the compilers optimization, by editing the assembly output it produces Problems and safety concerns: Can do anything with the processor and access any part of memory No notion of data type a sequence of bytes can be anything you want! You can jump anywhere in address space All higher level constructs (structures, arrays, etc.) exist only in the programmers implementation, and not in the language. Not portable between processors Compilers can usually produce assembly source code from the higher level language. This is useful for checking what the compiler does, and verifying its translation to that level. In fact, if the compiler produces correct assembly source code but incorrect object code, creating the assembly source and then using a different assembler to generate the object code could bypass the problem. More often, the assembly output is used to tweak performance in a slow routine. Use a profiling program to find out where the slow sections are first. The part of the program that the programmer thinks is likely to be slow is often not the actual problem. Running the program with a profiler will give hard numbers, and point to the truly sluggish sections of code. 6.4.3 C Language The C language is extremely popular because of its flexibility and support environment. C is often used in embedded and real-time environments, because hardware access is fairly easy and small, compact code can be generated. In many ways, C is a higher level assembly language. This gives it great flexibility, and opens a Pandoras box of possible errors. The support environment includes a wide variety of mature and inexpensive development and verification tools. Also, the pool of experienced vendors and personnel is quite large. However, Cs definition lacks the rigor necessary to qualify it as a suitable vehicle for safety-critical applications. There are dozens of dialects of C, raising integrity concerns about code developed on one platform and used on another. Despite its problems, many safety-critical applications have been coded in C and function without serious flaws. If C is chosen, however, the burden is on the developer to provide a thorough verification of code and data sequences, and sufficient testing to verify both functionality and error handling. One characteristic of C that decreases its reliability is that C is not a strongly typed language. That means that the language doesnt enforce the data typing, and it can be circumvented by representational viewing. (This means that by unintended use of certain language constructs, not by explicit conversion, a datum that represents an integer can be interpreted as a character.) The definition of strong typing implies an explicit conversion process when transforming one data type to another. C allows for implicit conversion of basic types and pointers. One of the features of strong typing is that sub-ranges can be specified for the data. With a judicious choice of data types, a result from an operation can be shown to be within a sub-range. In C it is difficult to show that any integer calculation cannot overflow. Unsigned integer arithmetic is modulo the word length without overflow detection and therefore insecure for safety purposes. Another feature of C that does not restrict operations is the way C operates with pointers. C does not place any restrictions on what addresses a programmer can point to and it allows arithmetic on pointers. While C's flexibility makes it attractive it also makes it a less reliable programming language. C has other limitations which are mentioned in reference [15]. Restricting the C language to certain constructs would not be feasible because the resulting language would not have the necessary functionality. However, rigorous enforcement of coding standards will decrease certain common errors and provide some assurance that the software will function as expected. Structured design techniques should be used. Limitations and Problems with the C language: Pointers Pointers in C allow the programmer to access anything in memory, if the operating system does not prevent it. This is good when writing a device driver or accessing memory-mapped I/O. However, a large number of C errors are due to pointer problems. Using a pointer to access an array, and then running past the end of the array, leads to smash the stack (see below). Pointer arithmetic can be tricky, and you can easily point outside of the data structure. Use of undefined pointers can trash memory or the stack, and lead the program to wander into undefined territory. Lack of Bounds Checking C does not provide any bounds checking on arrays and strings. It is left to the programmer to make sure that the array element is truly in bounds. Since the programmer is fallible, smash the stack and fandango on core often result. The problem is especially evident when passing an array to a function, which references it via a pointer. The function must know the length of the array, or it may run past the end. Calculations that determine the element to access must also be checked, as a negative or too large value can result, leading to out of bounds accesses. A wrapper function can be used when accessing an array or string which checks that the element is within bounds. This adds runtime overhead, but decreases the number of errors. Floating Point Arithmetic The ANSI C standard does not mandate any particular implementation for floating point arithmetic. As a result every C compiler implements it differently. The following test calculation can be executed: x = 1020+1 y = x-1020 The resulting value for y will differ greatly from compiler to compiler, none of them will be correct due to word length round-off. Casting from void* void* points to data of any type. It is left to the programmer to recast it when the pointer is used. There is no compile time nor run time checking to verify that the pointer is cast to a valid type (based on what the pointer actually points to). This method is inherently tricky and prone to errors. Commenting problems The C comment /* */ can lead to unexpected problems, by accidentally commenting out working code. Forgetting the end comment marker (*/) can cause the code that follows to be comment out, until another comment end marker is found. A good editor will often show this problem while the code is being developed, if it marks commented text in a different color. Also, compilers should be set up to generate warnings or errors to be generated if an open comment (/*) is found within a comment. Global variables Global variables can be considered as input parameters to any function, since a function has full access to them. So a function that takes 2 parameters, in a program with 100 global variables, actually has 102 parameters. This makes verifying the program very difficult. It is best to avoid global variables as much possible. Global variables also cause problems in a multi-threaded program, e.g. when different threads believe they have control of the variable, and both change the global. Common language errors Confusing = with == (assignment with logical equality) Confusing & vs. && (Bitwise AND with logical AND) premature semicolon in control structures fall-through behavior in switch statements when "break" is omitted Comparing signed and unsigned variables. Particularly, testing unsigned < 0 or unsigned < negative signed value. Side effects and macros Side effects, such as incrementing a variable with ++, when mixed with macros (including functions that are actually implemented as a macro, such as putchar()), may produce unexpected results. smash the stack* In C programming, to corrupt the execution stack by writing past the end of a local array or other data structure. Code that smashes the stack can cause a return from the routine to jump to a random address, resulting in insidious data-dependent bugs. Variants include trash the stack, scribble the stack, mangle the stack. precedence lossage* /pre's*-dens los'*j/ A C coding error in an expression due to unintended grouping of arithmetic or logical operators. Used especially of certain common coding errors in C due to the nonintuitively low precedence levels of "&", "|", "^", "<<" and ">>". For example, the following C expression, intended to test the least significant bit of x, x & 1 == 0 is parsed as x & (1 == 0) which the compiler would probably evaluate at compile-time to (x & 0) and then to 0. Precedence lossage can always be avoided by suitable use of parentheses. For this reason, some C programmers deliberately ignore the language's precedence hierarchy and use parentheses defensively. fandango on core* (Unix/C, from the Mexican dance) In C, a wild pointer that runs out of bounds, causing a core dump, or corrupts the malloc arena in such a way as to cause mysterious failures later on, is sometimes said to have "done a fandango on core". On low-end personal machines without an MMU, this can corrupt the operating system itself, causing massive lossage. Other frenetic dances such as the rhumba, cha-cha, or watusi, may be substituted. overrun screw* A variety of fandango on core produced by a C program scribbling past the end of an array (C implementations typically have no checks for this error). This is relatively benign and easy to spot if the array is static; if it is auto, the result may be to smash the stack - often resulting in heisenbugs of the most diabolical subtlety. The term "overrun screw" is used especially of scribbles beyond the end of arrays allocated with malloc; this typically overwrites the allocation header for the next block in the arena, producing massive lossage within malloc and often a core dump on the next operation to use stdio or malloc itself. C Programmer's Disease* The tendency of the undisciplined C programmer to set arbitrary but supposedly generous static limits on table sizes (defined, if you're lucky, by constants in header files) rather than taking the trouble to do proper dynamic storage allocation. If an application user later needs to put 68 elements into a table of size 50, the afflicted programmer reasons that he or she can easily reset the table size to 68 (or even as much as 70, to allow for future expansion) and recompile. This gives the programmer the comfortable feeling of having made the effort to satisfy the user's (unreasonable) demands, and often affords the user multiple opportunities to explore the marvelous consequences of fandango on core. In severe cases of the disease, the programmer cannot comprehend why each fix of this kind seems only to further disgruntle the user. *These quotations were taken from Imperial College, London, UK, world wide web home page Dictionary of Computer Terminology ( HYPERLINK "http://wombat.doc.ic.ac.uk/" http://wombat.doc.ic.ac.uk/), compiled by Denis Howe. It contains graphic descriptions of common problems with C. The quotations were reproduced by permission of Denis Howe . Other references [24] discussed the important problem of dynamic memory management in C (Note that simply prohibiting dynamic memory management is not necessarily the best course, due to increased risk of exceeding memory limits without warning). Programming standards for C should include at least the following: Use parentheses for precedence of operation, and do not rely on the default precedence. The default may not be what you thought it was, and it will come back to bite you. Use parentheses within macros, around the variable name Dont use the preprocessor for defining complex macros Explicitly cast or convert variables. Do not rely on the implicit conversions. Avoid void* pointers when possible. Check arrays and strings for out of bounds accesses. Always use function prototypes. This allows the compiler to find problems with inconsistent types when passing variables to a function. Minimize the use of global variables. Each global can be considered a parameter to every function, increasing the chance of accidentally changing the global. Always include a default clause in a switchcase statement. Avoid recursion when possible. Make extensive use of error handling procedures and status and error logging. The Ten Commandments for C Programmers by Henry Spencer Thou shalt run lint frequently and study its pronouncements with care, for verily its perception and judgment oft exceed thine. Thou shalt not follow the NULL pointer, for chaos and madness await thee at its end. Thou shalt cast all function arguments to the expected type if they are not of that type already, even when thou art convinced that this is unnecessary, lest they take cruel vengeance upon thee when thou least expect it. If thy header files fail to declare the return types of thy library functions, thou shalt declare them thyself with the most meticulous care, lest grievous harm befall thy program. Thou shalt check the array bounds of all strings (indeed, all arrays), for surely where thou typest "foo" someone someday shall type "supercalifragilisticexpialidocious". If a function be advertised to return an error code in the event of difficulties, thou shalt check for that code, yea, even though the checks triple the size of thy code and produce aches in thy typing fingers, for if thou thinkest "it cannot happen to me", the gods shall surely punish thee for thy arrogance. Thou shalt study thy libraries and strive not to re-invent them without cause, that thy code may be short and readable and thy days pleasant and productive. Thou shalt make thy program's purpose and structure clear to thy fellow man by using the One True Brace Style, even if thou likest it not, for thy creativity is better used in solving problems than in creating beautiful new impediments to understanding. Thy external identifiers shall be unique in the first six characters, though this harsh discipline be irksome and the years of its necessity stretch before thee seemingly without end, lest thou tear thy hair out and go mad on that fateful day when thou desirest to make thy program run on an old system. Thou shalt foreswear, renounce, and abjure the vile heresy which claimeth that "All the world's a VAX", and have no commerce with the benighted heathens who cling to this barbarous belief, that the days of thy program may be long even though the days of thy current machine be short. A checklist of Generic and C-specific programming standards is included in Appendix B. Additional guidelines on C programming practices are described in the book Safer C: Developing Software for High-integrity and Safety-critical Systems (Reference [25], and also in [27] and [28]). Included in the book are lists of undefined or implementation defined behaviors in the language. 6.4.4 C++ Language The C++ programming language was created by Bjarne Stroustrup as an extension (superset) of the C programming language discussed above (Section 6.4.3 C Language). The goal was to add object-oriented features, while maintaining the efficiency of C. The language was standardized in November, 1997 as ISO/IEC 14882. C++ adds Object Orientation (OO) as well as fixing or updating many C features. C++ is also more strongly typed than C. However, C++ suffers from many of the same drawbacks as C. A standard "safe subset" of C++ does not presently exist. Strengths of the C++ Language Object Orientation Object orientation allows data abstraction (classes), encapsulation of data and the functions that use the data, and reusable/extensible code. Stronger type checking than C C++ type checking can be subverted, but it is much better than Cs. Most of the mechanisms that reduce the type checking were left in to support compatibility with the C language. Const to enforce the invariability of variables and functions Declaring a function const means that the function will not change any passed parameters, even if they are passed by reference. A const variable cannot be changed, and replaces the #define preprocessor directives. The programmer can get around const with a cast. Generic programming (templates). C++ has the ability to use generic containers (such as vectors) without runtime overhead. C++ supports both Object-Oriented and Structural design and programming styles. The user-defined types (classes) have efficiencies that approach those of built-in types. C++ treats of built-in and user-defined types uniformly Exceptions and error handling Exceptions allow errors to be caught and handled, without crashing the program. They may not be the best way to handle errors, and the software does have to be explicitly designed to generate and deal with exceptions. However, exceptions are an improvement over Cs setjmp() and longjmp() means of exception handling. Namespaces Namespaces are most useful for libraries of functions. They prevent function names from conflicting, if they are in different libraries (namespaces). While not primarily a safety feature, namespaces can be used to clearly identify to the reader and the programmers what functions are safety related. References to variables A reference is like a pointer (it points to the variable), but it also simplifies the code and forces the compiler to create the pointer, not the programmer. Anything the compiler does is more likely to be error free than what the programmer would do. Inline Functions Inline functions replace #define macros. They are easier to understand, and less likely to hide defects. Good practices to reduce C++ errors: Never use multiple inheritance, only use one to one (single) inheritance. This is because interpretations of how to implement multiple inheritance are inconsistent (Willis and Paddon, [29] 1995. Szyperski supports this view.); Minimize the levels of inheritance, to reduce complexity in the program. Only rely on fully abstract classes, passing interface but not implementation (suggestion by Szyperski at 1995 Safety through Quality Conference - NASA-KSC [30]). Minimize the use of pointers. Do not allow aliases. No side-effects in expressions or function calls. Make the class destructor virtual if the class can be inherited. Always define a default constructor, rather than relying on the compiler to do it for you. Define a copy constructor. Even if a bitwise copy would be acceptable (the default, if the compiler generates it), that may change in the future. If any memory is allocated by the class methods, then a copy constructor is vital. If the class objects should not be copied, make the copy constructor and assignment operator private, and do not define bodies for them. Define an assignment operator for the class, or add a comment indicating that the compiler-generated one will be used. Use operator overloading sparingly and in a uniform manner. This creates more readable code, which increases the chance of errors being found in inspections, and reduces errors when the code is revisited. Use const when possible, especially when a function will not change anything external to the function. If the compiler enforces this, errors will be found at compile time. If not, it will aid in finding errors during formal inspections of the code. Dont use the RTTI (Run-Time Type Information). It was added to support object oriented data bases. If you think its necessary in your program, look again at your design. Avoid global variables. Declare them in a structure as static data members. Make sure that the destructor removes all memory allocated by the constructor and any member functions, to avoid memory leaks. Use templates with care, including the Standard Template Library. The STL is not thread-safe. Take special care when using delete for an array. Check that delete[] is used. Also check for deleting (freeing) a pointer that has been changed since it was allocated. For example, the following code will cause problems: p = new int[10]; // allocate an array of 10 integers p++; // change the pointer to point at the second integer delete p; // error, not array delete (delete[]) and pointer changed A review of potential problems in C++ was published by Perara (Reference [26]). The headings from that paper are as follows: Dont rely on the order of initialization of globals Avoid variable-length argument lists Dont return non-constant references to private data Remember The Big Three Make destructors virtual Remember to de-allocate arrays correctly Avoid type-switching Be careful with constructor initialization lists Stick to consistent overloading semantics Be aware of the lifetimes of temporaries Look out for implicit construction Avoid old-style casts Dont throw caution to the wind exiting a process Dont violate the Substitution Principle Remember there are exceptions to every rule. A detailed discussion is provided on each point, in that reference. 6.4.5 C# Language The C# language is cutting edge. It has been created by Microsoft and is expected to be released in the second half of 2001. C# is loosely based on C/C++, and bears a striking similarity to Java in many ways. Microsoft describes C# as follows: "C# is a simple, modern, object oriented, and type-safe programming language derived from C and C++. C# (pronounced 'C sharp') is firmly planted in the C and C++ family tree of languages, and will immediately be familiar to C and C++ programmers. C# aims to combine the high productivity of Visual Basic and the raw power of C++." C# has been created as part of Microsofts .NET environment, and is primarily designed for it. Any execution environment will have to support aspects that are specific to the Microsoft platform. Since they must support the Win32 API, C# may be restricted to Win32 machines. However, at least one company is considering porting C# to Linux. C# has the following features: Exceptions References can be null (not referencing real object). C# throws an exception if the reference is accessed. Garbage collection. You CANT delete memory, once it is allocated! Array bounds checking (throws an exception) Like Java, machine-independent code which runs in a managed execution environment (like the JVM) No pointers, except in routines marked unsafe Multi-dimensioned arrays Switch statements do not allow fall through to next case. Thread support, including locking of shared resources No global variables All dynamically allocated variables initialized before use. The compiler produces warnings if using uninitialized local variable. Overflow checking of arithmetic operations (which can be turned off if needed) foreach loop simpler way to do a for loop on an array or string. This decreases the chance of going out of bounds, because the compiler determines how often to loop, not the programmer. Everything is derived from the base class (system class). This means that integers, for example, have access to all the methods of the base class. The following code in C# would convert an integer to a string and write it on the console: int i = 5; System.Console.WriteLine (i.ToString()); Has a goto statement but it may only point anywhere within its scope, which restricts it to the same function or finally block, if it is declared within one. It may not jump into a loop statement which it is not within, and it cannot leave a try block before the enclosing finally block(s) are executed. Pointer arithmetic can be performed in C# within methods marked with the unsafe modifier. Internet oriented (like Java) The following features are of concern in embedded or safety environments: Garbage collection can lead to non-deterministic timing. It is a problem in real-time systems. Portability: C# is currently only designed to work on Microsoft Windows systems. Speed: C# is an interpreted language, like Java. Unlike Java, there is currently no compiler that will produce native code. 6.4.6 Forth Language The Forth language was developed in the 1960s by Charles Moore. He wanted a language that would make his work of controlling telescopes both easier and more productive. Forth is stack based the language is based on numbers and words. Numbers are pushed on the stack. Words are executed on the stack numbers, and are essentially functions. Words are kept in a Forth dictionary. The programmer can create new words (new functions), usually using existing words. Forth uses reverse polish notation. The last number pushed on the stack is the first off it. A simple Forth statement to add 3 and 4 is: 3 4 + (pushes 3, then pushes 4, + pops the top two numbers off the stack and adds them, then pushes the result onto the stack.). In this case, + is a built-in word (function). Forth has the benefits of higher level language (like C), but it is also very efficient (memory and speed-wise). It is used mainly in embedded systems. Forth can be used as the Operating System (OS) as well, and it often is in small embedded microcontrollers. Forth has no safety features. The programmer can do just about anything! It is very similar to C and assembly language this way. The flexibility and speed it gives must be balanced with the need to rigorously enforce coding standards, and to inspect the safety-critical code. The Forth programmer must know where each parameter/variable is on the stack, and what type it is. This can lead to errors, if the type or location is incorrectly understood. One positive aspect of Forth, from a safety standpoint, is that it is very easy to unit test. Each word is a unit and can be thoroughly tested, prior to integration into larger words. There is work on applying formal methods to Forth The following quote is from Philip J. Koopman Jr.2 The italics are added to emphasize particular aspects of concern to programming a safe system. Good Forth programmers strive to write programs containing very short (often one-line), well-named word definitions and reused factored code segments. The ability to pick just the right name for a word is a prized talent. Factoring is so important that it is common for a Forth program to have more subroutine calls than stack operations. Factoring also simplifies speed optimization via replacing commonly used factors with assembly language definitions. Forth programmers traditionally value complete understanding and control over the machine and their programming environment. Therefore, what Forth compilers don't do reveals something about the language and its use. Type checking, macro preprocessing, common subexpression elimination, and other traditional compiler services are feasible, but usually not included in Forth compilers. . Forth supports extremely flexible and productive application development while making ultimate control of both the language and hardware easily attainable. 2Philip J. Koopman Jr. by permission of the Association for Computing Machinery; A Brief Introduction to Forth; This description is copyright 1993 by ACM, and was developed for the Second History of Programming Languages Conference (HOPL-II), Boston MA. koopman@cmu.edu 6.4.7 FORTRAN Language FORTRAN was developed in the 1950s by IBM, and first standardized in the 1960s, as FORTRAN 66. It is primarily a numerical processing language, great for number crunching. FORTRAN has gone through several standards since the 1960s. The versions of the language considered here are FORTRAN 77 and Fortran 90. While not usually used in embedded systems, FORTRAN can still be used in a safety critical system (or a part of the system), if the numerical results are used in safety decisions. FORTRAN 77 is a structured, procedural language. It contains all the elements of a high level language (looping, conditionals, arrays, subroutines and functions, globals, independent compilation of modules, and input/output (formatted, unformatted, and file)). In addition, it had complex numbers, which are not part of the other languages considered here. There is no dynamic memory (allocate/deallocate) in FORTRAN 77. Elements of FORTRAN 77 related to safety Weak data typing. The data type of a variable can be assumed, depending on the first letter of the name, if it is not explicitly defined. GOTO statements. The programmer can jump anywhere in the program. Fixed-form source input. This relates to safety only in that it can look OK (in an inspection) and be wrong (incorrect column). However, the compiler should prevent this problem from occurring. Limited size variable names. The length of the variable name was limited to 8 characters. This prevented using realistic names that described the variable. Programmers often used cryptic names that made understanding and maintaining the program difficult. Lack of dynamic memory. This prevents the problems related to dynamic memory, though it limits the language for certain applications. The EQUIVALENCE statement should be avoided, except with the project manager's permission. This statement is responsible for many questionable practices in Fortran giving both reliability and readability problems.* Use of the ENTRY statement. This statement is responsible for unpredictable behavior in a number of compilers. For example, the relationship between dummy arguments specified in the SUBROUTINE or FUNCTION statement and in the ENTRY statements leads to a number of dangerous practices which often defeat even symbolic debuggers.* Use of COMMON blocks. COMMON is a dangerous statement. It is contrary to modern information hiding techniques and if used freely, can rapidly destroy the maintainability of a package. Array bounds checking is not done dynamically (at run time), though compilers may have a switch that allows it at compile time. *These elements were extracted from Appendix A of Hatton, L. (1992) "Fortran, C or C++ for geophysical software development", Journal of Seismic Exploration, 1, p77-92. Fortran 90 is an updated version of FORTRAN 77 that provides rudimentary support for Object Oriented programming, and other features. Fortran 90 includes: Dynamic memory allocation, specifically allocatable pointers and arrays. Rudimentary support for OOP. Inheritance is not supported. Constructors simply initialize the data members. There are no destructors. It does have derived types and operator overloading. Rudimentary pointers. A FORTRAN pointer is more of an alias (reference) than a C-style pointer. It cannot point to arbitrary locations in memory, or be used with an incorrect data type. Variables that will be pointed to must declare themselves as TARGETs. Free-style format and longer variable names (31 characters). These increase readability of the code. Improved array operations, including operating on a subsection of the array and array notation (e.g. X(1:N)). Statements like A=0 and C=A+B are now valid when A and B are arrays. Also, arrays are actually array objects which contain not only the data itself, but information about their size. There is also a built-in function for matrix multiplication (matmul). Better function declarations (prototyping). Modern control structures (SELECT CASE, EXIT, ...) User defined data types (modules). Like struct in C, or record in Pascal. Recursive functions are now a part of the language. Problems with Fortran 90: Order of evaluation in if statements (if (a and b)) is undefined. A compiler can evaluate b first, or a first. However, if (present(a) .and. a) could cause a problem, if the compiler evaluates a (right side) first, and a doesnt exist. Do not rely on order of evaluation in if statements. Allocatable arrays opens the door to memory leakage (not deallocating when done) and accessing the array after it has been deallocated. Implicit variables are still part of the language. Some compilers support the extension of declaring IMPLICIT NONE, which forces the data type to be declared. 6.4.8 Java Language Java was created by Sun Microsystems in 1995, with the first development kit (JDK 1.0) released in January, 1996. Since then, Java has become a widespread language, particularly in internet applications. Java is used in embedded web systems, as the front end/GUI for other embedded systems, and for data distribution/networking systems, among many other applications. Java was created to be platform independent. Java programs are not normally compiled down to the machine code level. They compile to byte code, which can then be run on Java Virtual Machines (JVM). The JVMs contain the machine-specific coding. When a Java program is run, the JVM interprets the byte code. This interpreted mode is usually slower than traditional program execution. In addition, timing will not be deterministic. Work is in process to create Java specifications for real-time, embedded systems. In December, 1998, the Java Real-Time Expert Group was formed to create a specification for extensions to Java platforms that add capabilities to support real-time programming in Java and to support the adoption of new methodologies as they enter into practice. The group has focused on new APIs, language, and virtual machine semantics in six key areas (the Java thread model, synchronization, memory management, responding to external stimuli, providing accurate timing, and asynchronous transfer of control). JSR-000001, Real-time Specification for Java, was released in June, 2000. Compilers for Java programs do exist. They compile the program down to the machine level. This decreases the portability and removes the platform independence, but allows an increase in execution speed and a decrease in program size. Compiled Java programs do not need a Java Virtual Machine (JVM). Java has the following features: Fully Object Oriented. This has the plusses of reusability and encapsulation Dynamic loading of new classes, and object/thread creation at runtime. No pointers allowed! No pointer arithmetic and other pointer problems common in C. However, objects can be accessed through references. Garbage collection to free unused memory. The programmer doesnt have to remember to delete the memory. Support for threads, including synchronization primitives. Support for distributed systems. No goto statement, though labeled break/continue statements are allowed. Allows implicit promotion (int to float, etc.), but conversion to lower type needs explicit cast Variables initialized to known values (including references) Allows recursion Checks array bounds and references to null Java's document comments (//*) and standard documentation conventions aid in readability. Type safe (compile variable and run-time types must match) No operator overloading Built-in GUI, with support for events Built-in security features (language limits uncontrolled system access, bytecode verification is implemented at run-time, distinguishes between trusted and untrusted (foreign) classes, and restricts changing of resources. Packages downloaded code can be distinguished from local) However, still not secure. Ways to circumvent are found, and bug fixes are released Java automatically generates specifications (prototypes) (as opposed to using redundant specifications). Java has these limitations: cant interface to hardware; must use native methods of another language to do so. Uses a Java Virtual Machine, which must be tested or certified, unless compiled to native code. Garbage collection to free unused memory cant be turned off! This affects determinism in real-time systems. Selfish threads (those that do not call sleep()), on some OSs, can hog the entire application. Threads can interfere with each other if using the same object. Synchronization makes the thread not be interrupted until done, but deadlock can still occur. Doesnt detect out of range values (such as integer multiplication leading to an integer value that is too large). When passing arguments to functions, all objects, including arrays, are call-by-reference. This means that the function can change them! Java is an interpreted language, which is often slower than a compiled language. Compilers are available, however, which will get around this problem. Non-deterministic timing. Real-time extensions are being worked on, but they are not standardized yet. The Java language has not been standardized by a major standards group. It is in the control of Sun Microsystems. 6.4.6 LabVIEW LabVIEW is a graphical programming language produced by National Instruments. It is used to control instrumentation, usually in a laboratory setting. LabVIEW allows the user to display values from hardware (temperatures, voltages, etc.), to control the hardware, and to do some processing on the data. It is primarily used in ground stations that support hardware (such as space flight instruments). LabVIEW may be part of safety critical software development if the ground station it supports is safety critical. In addition, it may be used to support infrastructures (e.g. wind tunnel) that have safety critical aspects. In LabVIEW, the method by which code is constructed and saved is unique. There is no text based code as such, but a diagrammatic view of how the data flows through the program. LabVIEW is a tool of the scientist and engineer (who are not always proficient programmers) who can often visualize data flow, but are unsure of how to convert that into a conventional programming language. Also, LabVIEWs graphical structure allows programs to be built quickly. Data flow is the fundamental tenet by which LabVIEW code is written. The basic philosophy is that the passage of data through nodes within the program determines the order of execution of the functions of the program. LabVIEW VI's (Virtual Instruments) have inputs, process data and produce outputs. By chaining together VI's that have common inputs and outputs it is possible to arrange the functions in the order by which the programmer wants the data to be manipulated. LabVIEW source code and development is supported by Windows 9x/2000/NT, Macintosh, PowerMax OS, Solaris, HP-Unix, Sun, Linux, and Pharlap RTOS (Real-Time Operating System). Executables can be compiled under their respective development systems to run on these platforms (native code). Code developed under one platform can be ported to any of the others, recompiled and run. LabVIEW has rich data structures (For and While loops, Shift registers, Sequencing, and Arrays and clusters). It supports polymorphism and compound arithmetic. Display types include Indicators, Graphs and charts, and Instrument simulation. Strings and file handling are included in LabVIEW. Many debugging techniques, such as breakpoints, single stepping, and probes, are supported. A real-time version of LabVIEW (LabVIEW-RT) exists for embedded processors. Because you cant pop the hood of LabVIEW and review the source code, formal inspections cannot be done on it. Thorough analysis and testing are highly recommended if LabVIEW is used in safety critical systems. 6.4.7 Pascal Language The Pascal language was originally designed in 1971 by Niklaus Wirth, professor at the Polytechnic of Zurich, Switzerland. Pascal was designed as a simplified version for educational purposes of the language Algol, which dates from 1960. The Pascal language was has been used as a tool to teach structured programming. While there is still a strong subset of Pascal advocates, the language is not commonly used anymore. The original Pascal standard is ISO 7185 : 1990. The Extended Pascal standard was completed in 1989 and is a superset of ISO 7185. The Extended Pascal standard is ANSI/IEEE 770X3.160-1989 and ISO/IEC 10206 : 1991. Object Oriented Pascal was released as a Technical Report by ANSI in 1993. Object Pascal is the language used with the Delphi Rapid Applications Development (RAD) system. SPADE Pascal* is a subset that has undergone the study and verification necessary for safety-critical applications. The major issue with Pascal is that no provision for exception handling is provided. However, if a user employs a good static code analysis tool, the question of overflow in integer arithmetic can be addressed and fixed without needing exception handlers. The SPADE Pascal subset is suited to safety-critical applications. *SPADE PASCAL is a commercially available product and is used here only as an example to illustrate a technical point. 6.4.8 Visual Basic Visual Basic is a Microsoft version of the Basic language for use with Windows operating systems. It is oriented toward GUIs (Graphical User Interfaces), and is proprietary. However, because Visual Basic is easy to use, many programs that will run under Windows use it as the user interface, and some other language for the meat of the program. Visual Basic is a Rapid Application Development (RAD) tool, like Delphi (which uses Pascal). Features of Visual Basic: Strongly typed, if type checking is turned on; weakly typed if it is not! Variable types do not have to be declared. The older style of a type suffix on the end of the name (e.g. str$ for a string variable) is still allowed. Has a variant data type that can contain data in various formats (numerical, string, etc.). Use of this data type subverts the attempt to enforce strong data typing. Component based and not true Object Oriented. A component is a binary package with a polymorphic interface. Other components in the system depend upon nothing but the interface. The underlying implementation can be completely changed, without affecting any other component in the system, and without forcing a re-link of the system. Inheritance is not supported in VB. Interpreted environment. The Visual Basic environment checks the syntax of each line of code as you type it in, and highlights these errors as soon as you hit the enter key. Compilers are now available for VB, which speeds up program execution speed. Trapping. Visual Basic lets the programmer catch runtime errors. It is possible to recover from these errors and continue program execution. The code is hidden from the programmer. This is a strength of Visual Basic, as it makes programming much easier (graphical, drag-and-drop). However, the code is very difficult to inspect, unless the inspectors are intimately knowledgeable about Microsoft Windows and Visual Basic. In many ways, Visual Basic is an automatic code generating program. 6.5 Miscellaneous Problems Present in Most Languages The following quotations were taken from the Imperial College, London, UK, world wide web home page Dictionary of Computer Terminology, compiled by Denis Howe. It contains graphic descriptions of common problems. aliasing bug (Or "stale pointer bug") A class of subtle programming errors that can arise in code that does dynamic allocation, especially via malloc or equivalent. If several pointers address (are "aliases for") a given hunk of storage, it may happen that the storage is freed or reallocated (and thus moved) through one alias and then referenced through another, which may lead to subtle (and possibly intermittent) lossage depending on the state and the allocation history of the malloc arena. This bug can be avoided by never creating aliases for allocated memory. Use of a higher-level language, such as Lisp, which employs a garbage collector is an option. However, garbage collection is not generally recommended for real-time systems. Though this term is nowadays associated with C programming, it was already in use in a very similar sense in the ALGOL 60 and FORTRAN communities in the 1960s. spam To crash a program by overrunning a fixed-size buffer with excessively large input data. heisenbug /hi:'zen-buhg/ (From Heisenberg's Uncertainty Principle in quantum physics) A bug that disappears or alters its behaviour when one attempts to probe or isolate it. (This usage is not even particularly fanciful; the use of a debugger sometimes alters a program's operating environment significantly enough that buggy code, such as that which relies on the values of uninitialized memory, behaves quite differently.) In C, nine out of ten heisenbugs result from uninitialized auto variables, fandango on core phenomena (especially lossage related to corruption of the malloc arena) or errors that smash the stack. Bohr bug /bohr buhg/ (From Quantum physics) A repeatable bug; one that manifests reliably under a possibly unknown but well-defined set of conditions. mandelbug /man'del-buhg/ (From the Mandelbrot set) A bug whose underlying causes are so complex and obscure as to make its behaviour appear chaotic or even nondeterministic. This term implies that the speaker thinks it is a Bohr bug, rather than a heisenbug. schroedinbug /shroh'din-buhg/ (MIT, from the Schroedinger's Cat thought-experiment in quantum physics). A design or implementation bug in a program that doesn't manifest until someone reading source or using the program in an unusual way notices that it never should have worked, at which point the program promptly stops working for everybody until fixed. Though (like bit rot) this sounds impossible, it happens; some programs have harboured latent schroedinbugs for years. bit rot (Or bit decay). Hypothetical disease the existence of which has been deduced from the observation that unused programs or features will often stop working after sufficient time has passed, even if "nothing has changed". The theory explains that bits decay as if they were radioactive. As time passes, the contents of a file or the code in a program will become increasingly garbled. There actually are physical processes that produce such effects (alpha particles generated by trace radionuclides in ceramic chip packages, for example, can change the contents of a computer memory unpredictably, and various kinds of subtle media failures can corrupt files in mass storage), but they are quite rare (and computers are built with error-detecting circuitry to compensate for them). The notion long favoured among hackers that cosmic rays are among the causes of such events turns out to be a myth; see the cosmic rays entry for details. Bit rot is the notional cause of software rot. software rot Term used to describe the tendency of software that has not been used in a while to lose; such failure may be semi-humourously ascribed to bit rot. More commonly, "software rot" strikes when a program's assumptions become out of date. If the design was insufficiently robust, this may cause it to fail in mysterious ways. For example, owing to endemic shortsightedness in the design of COBOL programs, most will succumb to software rot when their 2-digit year counters wrap around at the beginning of the year 2000. Actually, related lossages often afflict centenarians who have to deal with computer software designed by unimaginative clods. One such incident became the focus of a minor public flap in 1990, when a gentleman born in 1889 applied for a driver's licence renewal in Raleigh, North Carolina. The new system refused to issue the card, probably because with 2-digit years the ages 101 and 1 cannot be distinguished. Historical note: Software rot in an even funnier sense than the mythical one was a real problem on early research computers (eg. the R1). If a program that depended on a peculiar instruction hadn't been run in quite a while, the user might discover that the opcodes no longer did the same things they once did. ("Hey, so-and-so needs an instruction to do such-and-such. We can snarf this opcode, right? No one uses it.") Another classic example of this sprang from the time an MIT hacker found a simple way to double the speed of the unconditional jump instruction on a PDP-6, so he patched the hardware. Unfortunately, this broke some fragile timing software in a music-playing program, throwing its output out of tune. This was fixed by adding a defensive initialization routine to compare the speed of a timing loop with the real-time clock; in other words, it figured out how fast the PDP-6 was that day, and corrected appropriately. memory leak An error in a program's dynamic store allocation logic that causes it to fail to reclaim discarded memory, leading to eventual collapse due to memory exhaustion. Also (especially at CMU) called core leak. These problems were severe on older machines with small, fixed-size address spaces, and special "leak detection" tools were commonly written to root them out. With the advent of virtual memory, it is unfortunately easier to be sloppy about wasting a bit of memory (although when you run out of virtual memory, it means you've got a *real* leak!). memory smash (XEROX PARC) Writing to the location addressed by a dangling pointer. 6.6 Programming Languages: Conclusions The safest languages are Ada95 (and Ada83) and the SPADE Pascal subset. Ada was specifically created with safety in mind. However, Ada is not the most popular language, and finding and keeping good Ada programmers can be difficult. For this reason, other languages are often chosen. If choosing any of the other languages, especially C, assembly language, or Forth, be aware of the limitations. Create and enforce a coding standard. Devote extra time to inspections, analysis and test. Educate the developers on the best programming practices for that language, and on the pitfalls of the language chosen. Take a proactive approach to reducing errors up front, then test the stuffing out of the software! 6.7 Compilers, Editors, Debuggers, IDEs and other Tools The minimal set of tools (programs) that a software developer needs is: Editor to create the software (source code) with. Compiler (or cross-compiler) to create object code with, from the source code. Linker to create an executable application from the object code. Debugger to find the location of defects in the software. Often these tools come bundled in an Integrated Development Environment (IDE), where the developer can shift from editing to compiling/linking to debugging, and back to editing, without leaving the programming environment. Many IDEs have additional tools, or have the ability to add in tools from other vendors. How well the tools can be integrated is something the developer should look at when choosing an IDE. In an embedded environment, the IDE can include simulators for the target hardware, the ability to download the software generated into the target hardware, and sophisticated debugging and monitoring capabilities. Some IDEs are designed for safety critical software development. For example, DDC-I, a company that specialized in safety-critical software development and tools, has an IDE called SCORE (Safety-Critical Object-oriented Real-time Embedded). The SCORE development environment has been designed to address the needs of safety-critical, real-time, embedded systems, according to their website.  HYPERLINK "http://www.ddci.com/products/SCORoot.htm" http://www.ddci.com/products/SCORoot.htm Humans make mistakes, and programmers are only human (despite what some may claim). The goal of a tool is to find as many of the errors as quickly as possible. Some tools help enforce good programming practice. Others make life difficult for the programmer and lead to additional errors, because the programmer is annoyed or is actively subverting the intent of the tool! In general, look for tools that are: Easy to learn. Well integrated (if in an IDE) or easy to integrate with other tools. Well integrated means that it is easy to switch between the different tools. Default to enforcing standards, rather than relying on the programmer to set the right switches Well documented. This includes not only documentation on how to use the tool, but limitations and problems with using the tool. Knowing what the tool cannot do is as important as what it can do. A good article on choosing tools for embedded software development is Choosing The Right Embedded Software Development Tools[6] Editors can be a simple text editor (such as Windows NotePad), a text editor designed for programmers (that handles indenting, etc.) or a sophisticated, graphical-interfaced editor. Whatever kind is chosen, look for these features: Can the style (such as indentation) be set to match that chosen for the project? Does the editor show language keywords and constructs (including comments) in a different way (e.g. various colors), to help the programmer catch errors such as mistyping a keyword or forgetting to close out a multi-line comment? What kinds of errors can the editor flag? Can the editor support multiple files and search/replace among all of them? Can a variable be tracked across multiple files? Compilers and linkers usually come together as one piece of software. Cross-compilers run on one system (usually a desktop computer) and produce object code for another processor. When choosing a compiler, consider the following: Can warnings (possible problems) be treated as errors? Can this compiler switch be set as the default mode? Is there a list of defects (bugs) in the compiler? Is there a list of historical defects, and the versions/patches they were corrected in? Can the compiler produce assembly language output? What assembler program is the output targeted to? Does the compiler support integration with a debugging tool? Does it include the option of symbolic information, that allows the debugger to reference high-level language source code? Does the compiler (or cross-compiler) support the particular processor being used? Does it optimize for that processor? For example, if the software will run on a Pentium II, the compiler should optimize the code for that processor, and not treat it like an 80386. Does the compiler offer optimization options that allow you to choose size over speed (or vice versa), or no optimization at all? If used in an embedded system, does the compiler support packed bitmaps (mapping high-level structures to the hardware memory map), in-line assembly code, and writing interrupt handlers in the high-level language? How optimized is the run-time library that comes with the compiler? Can you use stubs to eliminate code you dont need? Is the source code available, so that unneeded code can be stripped out, or for formal verification or inspection? Debuggers are a vital tool to finding errors, once theyve reared their ugly little heads. Software debuggers run the defective software within the debugging environment. This can lead to some problems, if the problem is memory violations (out of bounds, etc.), since the debug environment and the normal runtime environment differ. Hardware debuggers (e.g. In-Circuit Emulators) run the code on a simulated processor, with the ability to stop and trace at any instruction. Debuggers operate by stopping program execution at breakpoints. Breakpoints can be a particular instruction, a variable going to a specific value, or a combination of factors. Once the program is stopped, the environment can be interrogated. You can look at the values of variables, the stack, values in specific memory locations, for example. From the breakpoint, you can single-step through the program, watching what happens in the system, or run until the next breakpoint is triggered. Debuggers usually need some symbolic information to be included in the object/executable code for them to be able to reference the source code. When debugging, you usually want to be able to see the source code, and not the assembly equivalent. The debugger and the compiler/linker must know how to talk to each other for this to happen. When evaluating debuggers, consider the following: How well does the debugger and the compiler get along? Will they talk to each other? How much of the system will the debugger let you get at? Can you see memory locations, variable values, and the stack trace? Can you change whats in a memory location or variable? Does the debugger allow you to trace back through procedure calls? Can you trigger on multiple events simultaneously, such as a variable being set to a value while another variable is at a defined value? Can you stop at a memory location only if the value of a variable matches a preset condition? Does the debugger support debugging at the high level language, mixed high level/assembly language, and at the assembly language level? Can the debugger display the high-level data structures used? In addition to the basics, these tools are very useful in creating good (and safe) code: Lint finds problems in the code that compilers might miss. Not everything is a true problem, but should be evaluated. If its non standard, treat it as an error! Profiler checks speed of program. Good for finding routines that take the most time. Points to areas to where optimization may be useful. Memory check programs find memory leaks, writing outside of bounds. Locator needed for embedded environments, when you must separate what parts go in ROM (program) and what go in RAM (variables, stack, etc.) 6.8 CASE tools and Automatic Code Generation 6.8.1 Computer-Aided Software Engineering (CASE) Computer-aided software engineering (CASE) is a collection of automated tools that support the process of software engineering. CASE can include: Structured Analysis (SA) Structured Design (SD) Code Generators Documentation Generators Defect Tracking Requirements Tracing Structured Discourse and Collaboration tools Integrated Project Support Environments (IPSEs) Inter-tool message systems Reverse Engineering Metric Generators/Analyzers. Tools such as editors, compilers, debuggers, Integrated Development Environments may technically be CASE tools, but are usually considered separately. Project management tools (scheduling and tracking) and Configuration Management (Release Management, Change Management (CM)) may also be considered CASE tools. When CASE was first promoted in the 1980s, the quality of the tools provided was not very good. CASE tools did not cover enough of the software development cycle, did not integrate well with other tools, and were very expensive for what you actually got out of them. While CASE tools are still rather expensive, their quality, reliability, and interoperability have greatly improved. There are even efforts to produce free CASE tool suites. CASE tools are now classified in three types that describe their functionality and usage. Upper CASE is used during the early phases of software development when initial design, data and process requirements, and the interface design are determined. Requirements analysis and traceability tools, and design tools are included in this classification. Lower CASE tools are primarily those that generate code from the design (output of the Upper CASE tools). These tools can also modify an existing application into a new application with minimal new programming. The third category is integrated CASE (I-CASE), which joins the Upper and Lower CASE tools and helps in the complete software development process. CASE tools include: Analyzers for software plans, requirements and designs Methodology support (design, state charts, etc.) Model Analysis (consistency checking, behavior analysis, etc.) Source code static analyzers (auditors, complexity measurers, cross-referencing tools, size measurers, structure checkers, syntax and semantics analyzers) Requirements Tracing Design tools (UML modeling, etc.) Configuration Management System/Prototype simulators Requirements-based Test Case Generators Test Planning tools Test Preparation Tools (data extractors, test data generators) Test Execution Tools (dynamic analyzers-assertion analyzers, capture-replay tools, coverage/frequency analyzers, debuggers, emulators, network analyzers, performance/timing analyzers, run-time error checkers, simulators, test execution managers, validation suites)\ Test evaluators (comparators, data reducers and analyzers, defect/change trackers) Reengineering tools 6.8.2 Automatic Code Generation Automatic code generation is one aspect of CASE. It has the advantages of allowing the software to be designed at a higher level then translated, without human error, into source code. The design becomes the source code. The downside to automatic code generation is that the tools are only now becoming mature. While human error is eliminated in the translation from design to code, tool error still exists. The correct translation from design to code must be verified for safety critical software. Keep in mind that in some environments, the source code may not be accessible. In addition, how well the code is optimized may affect performance or size criteria. Code can be automatically generated in several ways: Visual languages, such as LabVIEW, have the developer design the program graphically. The underlying source code is not visible (or accessible) to the programmer. Visual programming environments (e.g. Visual Basic) provide graphical programming, with access to the source code produced. Wizards automatically generate applications or parts of applications based on feedback about the desired features from the programmer. The wizards automatically generate code based on this feedback. Generating code from design models. These models usually use a set of diagrams that depict the structure, communication, and behavior of the system. The model may be supplemented with text-based specifications of system actions, such as computations. Design methodologies that can be used for code generation include the following. Not all tools or approaches will support all design modeling methodologies. Unified Modeling Language (UML) Object Modeling Technique (Rumbaugh) Object-Oriented Software Engineering (Jacobson) Object-Oriented Analysis and Design (Booch) Specification and Description Language (SDL) Real-time Object-Oriented Method (ROOM) Object-Oriented Analysis (OOA Shlaer and Mellor) Harels hierarchical statecharts 6.8.2.1 Visual Languages A visual language is one that uses a visual syntax, such as pictures ore forms, to express programs. Text can be part of a visual syntax as well. LabVIEW by National Instruments, VEE by Hewlett Packard, and PowerBuilder (Austin Software Foundry) are examples of visual languages. Visual languages are wonderful for prototyping applications, especially when the user interface is important. The development can be participatory, with the users and developers sitting down at a machine and designing the application interface together. A problem with visual languages in safety critical applications is the inability to inspect the code. What happens between the graphical program creation and the operations of the program is a black box. In addition, little formal development is done when visual languages are used. Formal specifications are usually lacking or non-existent. Configuration control is often not considered, and configuration management tools may have problems with the visual representations (language). 6.8.2.2 Visual Programming Environments A visual programming environment (VPE) uses a visual representation of the software and allows developers to create software through managing and manipulating objects on a visual palette. Examples are Visual Basic (Visual C++, and other visual languages) and Delphi (by Borland). A visual programming environment uses a graphical interface to allow the developer to construct the software. From the visual elements (often the user interface), code is generated in the appropriate language. The developer must hand-code the interactions between elements, and must hand-code all the guts of the program. This is very close to traditional programming, with the addition of easily creating graphical user interfaces. In fact, VPEs can be used to create regular programs without the fancy user interface, or to hand-code the user interface if desired. Since VPEs produce source code, it can be formally inspected and analysis tools can be used with it. However, since the code was not generated by the developers, it may not follow the style or coding standards of the development team. The source code may be difficult to follow or understand, and its relationship back to the graphical environment may not always be obvious. 6.8.2.3 Code Generation from Design Models Model-based code generation produces application source code automatically from graphical models (designs) of system behavior or architecture. One advantage of model-based development is to raise the level of abstraction at which developers can work. The design (model) becomes the program, and only the design has to be maintained. Code Generation from Object Models [3] discusses the various approaches to code generation for object-oriented systems, and gives some of the plusses and minuses of using each approach. In many ways, the moved to model-based code generation parallels the move from assembly to high-level languages. Each move along the path is a step up the abstraction ladder. Each step frees the developer from some of the gritty details of programming. However, each step also brings with it challenges in verifying that the program is safe! The methodology and tools go hand in hand. Some tools support multiple design methodologies, some only support one. When choosing a methodology (and tool), consider: The suitability of the modeling language for representing the problem. (How good is the modeling methodology for your particular problem?) The sufficiency of modeling constructs for generating code (how much of the code can it generate, how much will have to be hand coded?) The maturity of translators for generating quality code (Have the translators been used for years, or created yesterday? How much analysis has been done to verify the software that is produced by the translators?) Tools for development tasks related to code generation (Does it integrate with the debugger?) Methodologies for employing code generation effectively (What method does the tool use to translate your design into code?) The selection of tools and methods appropriate to the application (Whats the right method for the problem? Whats the right tool for the development environment? Do they match (best tool works with best methodology)?) The language the tool produces source code in (Is it Ada? C++?). For object-oriented systems, there are three approaches to model-based code generation: The Structural approach is based on state machines. It can create an object framework from the model design. Dynamic behavior and object communication is added by the programmer (hand coded). This includes hard deadlines and interrupt handling. This approach is used with UML and the Rumbaugh, Jacobson, and Booch OO design techniques. Most tool vendors support this approach. The tools usually can be used to reverse engineer can be done on existing code as well. The Behavioral approach is based on state machines augmented with action specifications. It includes both static and dynamic behavior (state transitions). Specification and Description Language (SDL telecommunications standard) and UML (among other methods) support this approach. What needs to be hand coded are event handlers and performance optimizations. Developers must adopt a state-machine view of the system functionality in addition to an object view of the system structure. Because the behavior is fully specified, a simulated model for test/debug can be created prior to code completion. The Translative approach is based on application and architecture models that are independent of one another. The application model uses Object-Oriented Analysis (OOA) by Shlaer and Mellor. This approach can simulate the system before developing code (same as behavioral). The architecture model is a complete set of translation rules that map OOA constructs onto source code and implementation (run-time) mechanisms. Some tools support other design methodologies. Structured analysis and design can be used to create code frames for the system structure. The frames have to be fleshed out by the developer, however. Also, data flow diagrams can be used by several tools. Once can produce code for Digital Signal Processors (DSP). The code generated implements the flow of system. Processing steps are either hand-coded or standard library routines. Data flow diagrams are used in specific application tools for control systems, instruments (e.g. LabVIEW), and parallel data processing. In an ideal world, the CASE tool would be certified to some standard, and the code generated by it would be accepted without review, in the same way that the object code produced by a compiler is often accepted. However, compilers produce errors in the object code. Automatically generated code is in its infancy. When the code is safety critical, or resides in an unprotected partition with safety critical code, the automatically generated code should be subjected to the same rigorous inspection, analysis, and test as hand-generated code. 6.9 Software Configuration Management Software configuration management (SCM) is often considered a part of project management and not software development or testing. It is a vital part of the development process, however, that should not be overlooked. It is very unlikely that you can produce safe software without it. You certainly cannot convince the Quality/Product Assurance/Safety personnel that the software is safe if you havent implemented SCM! Software Configuration Management (SCM) is much more than just version control of source code. It is a process to maintain and monitor the software development process as well. SCM includes: Identification: an identification scheme is needed to reflect the structure of the product. This involves identifying the structure and kinds of components, making them unique and accessible in some form by giving each component a name, a version identification, and a configuration identification. Control: controlling the release of a product and changes to it throughout the lifecycle by having controls in place that ensure consistent software via the creation of a baseline product. Status Accounting: recording and reporting the status of components and change requests, and gathering vital statistics about components in the product. Audit and review: validating the completeness of a product and maintaining consistency among the components by ensuring that components are in an appropriate state throughout the entire project life cycle and that the product is a well defined collection of components. Often, project documentation (specifications, plans, etc.) are maintained by one person, and the source code version control is handled by the programmers. This is not a good idea unless the tools used are well integrated, and someone has control of the process. Having everything accessible in one location facilitates the status accounting and audit/review process. Having someone else in control of source code may force developers to document the changes completely. Simply having an outside eye on the process improves the chance of finding errors or potential problems (two developers working on the same module but for different change requests, for example). Software Configuration Management is usually performed using a tool (program). However, a file or folder should be maintained, to collect information that is not in electronic form. This information could include the design notes scribbled on a napkin or a fax that only exists in hard-copy. The point is to collect all pertinent information in one place. It would be good if the information is cataloged in the electronic SCM system, so that it can be found again when needed. 6.9.1 Change control Change control is an important part of developing safe software. Arbitrary changes, because a developer thought they would be more efficient, for example, should be avoided. Once a piece of software has reached a level of maturity, it should be subject to a formal change control process. What that level of maturity is will vary by group. It could be when the module compiles, when the CSCI (which may contain several modules) is completed, or when the whole program is at its first baseline. Formal change control usually includes a form to request a change (Software Change Request, Engineering Change Request, etc.). The form is filled out by the developer, the customer, or someone else involved in the project. The form should include both what should be changed and why. A Change Control Board (also called an Engineering Review Board, and by other names) is convened to evaluate the change request. The board consists of several people, including someone from Software Quality Assurance. When safety is an issue, someone from safety or risk management should also be included on the board. The requestor may be at the CCB meeting, or the board may just evaluate the submitted form. The board may approve the change, reject it, combine it with other requests, or suggest a modification. Another way software changes occur is through a problem reporting/corrective action (PRACA) process. A PRACA is issued during the operation of the software, usually during testing. If the software isnt operating as it should, a PRACA is written. The problem report goes to the developers, who must find out what the problem is. If the fix to the problem involves a change to the software, it must go through the Change Control Board. All the paperwork from the Change Control process should be archived in the configuration management system. This includes software requests, PRACAs, notes from CCB meetings, and any other pertinent information. The configuration management system provides a repository for storing this data for later retrieval. In addition, a cross-index should be created between software changes, requirements, code module versions, and tests. This could be a database, a spreadsheet, or some other format. Being able to know what modules a software change affected impacts what tests need to be run. The change may also indicate that a requirement changed, and that the software requirements document needs to be updated. 6.9.2 Versioning Versioning is the part of software configuration management that most people think of first. It involves archiving the source code, keeping previous versions when a new version is added to the SCM tool. Sometimes a complete previous version is kept, other tools use a delta (difference) from the previous version to the new version. Each module will have a version number associated with it. A release will consist of all the modules and their associated version numbers. Some SCM tools allow branching, where a release will go down two or more paths (perhaps a freeware version and a complete version, for example). Versioning keeps the changes straight, and also allows roll back to previous versions if a bug is found down the road. Most SCM tools also have a check-in/check-out policy, to prevent changes by multiple programmers on the same module. Some will allow only one programmer to work on the module at one time. Other SCM tools will do a merge when multiple developers check in the same module. One weakness of many SCM tools is that the programmer can get away without good documentation on what changes were made and why. The tool keeps the changes, but the reasoning behind it usually is added as a comment upon check-in of the module. Some tools force the developer to say something, but not necessarily something useful! At a minimum, when a module is changed the following should be done: Clearly identify the area of code that is changed (within the source code). Use a comment with some character string (such as *****) that is easy to spot when flipping through the source code. Identify the end of the changed area the same way. Have a header at the top of the altered code that includes why the change occurred (change request, PRACA, etc.), what was changed (in English, not code-ese), when it was changed, and by whom. Include the what/when/why/who information in the module check-in comment. This information can be extracted for status accounting (see below). 6.9.3 Status Accounting According to MIL-STD-482A, configuration status accounting is The recording and reporting of the information that is needed to manage configuration effectively, including a listing of the approved configuration identification, the status of proposed changes to configuration, and the implementation status of approved changes. Status Accounting answers the question "how completely ready is the software?". Decide what stages of incompleteness, correctness and obsoleteness need to be known about each item and to what audience, give each stage a status name (e.g. draft, under review, ready for integration/delivery, operational, superseded), and collect the status of each item. Collate the information into a human-understandable format. Part of status accounting is the ability to create reports that show the status of each document (version, whether it is checked-out, when it was last updated, who made the changes, and what was changed). The status of change requests and PRACAs are also included in status accounting. While the status information can be compiled by hand, it can be a tedious process. Many tools exist that provide an integrated configuration management system for all kinds of documents, including source code, and that can generate the status reports when requested. Some of these tools are free or low-priced. The configuration management system need to be audited occasionally. The audit can be a formal affair, or an informal look at the system by someone other than the configuration manager, such as Software Product Assurance or Software QA. The purpose of the audit is to verify that what the status accounting says about the project is actually true, and to look for holes in the process that can lead to problems in the future. 6.9.4 Defect Tracking Defect (bug) tracking is sometimes handled outside of Software Configuration Management. However, integrating it with the SCM process facilitates the keep it all in one place philosophy. When its the middle of the night and youre trying to find information on a bug you thought you had killed a week ago, youll appreciate a well-ordered system. Defect tracking has several purposes. One is to record all the defects for future reference. This can be simply for historical purposes, or to have something to reference when you think youve seen this defect before. Having defect information from previous projects can be a big plus when debugging the next project. Recording the defects allows metrics to be determined. One of the easiest ways to judge whether a program is ready for serious safety testing is to measure its defect densitythe number of defects per line of code. If testing has found the majority of defects, then the software is likely to be stable. Safety testing would put the software through its paces, usually by generating error conditions and verifying graceful behavior by the program. You dont want to stress the software until youre pretty sure most of the bugs have been found. To determine the defects per lines of code, you need to know two pieces of information, both of which can be extracted from a good configuration management system: lines of code and number of defects. You also need a history from other projects on defects/lines of code (from your projects, or general industry numbers). If the average defects/KLOC (thousand lines of code) is 6, and the software is 10,000 LOC, then about 60 defects exist in the software. If testing has only found 10, a lot more testing needs to be done. The software in the example has a high risk, because many more defects linger in the code. One question in defect tracking is whether to use bugs found during unit testing by developers. It would be best if those defects are documented. The developer can see if he has a tendency to a certain kind of bug. Other programmers can learn from the experience of the developer and avoid similar defects. And a better idea of the number of defects (per KLOC, per complexity, etc.) can be determined. Developers often resist documenting the unit test bugs, however. Make the process easy on them, and make sure management does not have access to the information for a specific developer! 6.9.5 Metrics from your SCM system Keeping tabs on the various elements of your software development project can show when the project is getting into trouble (cost, schedule, cannot meet delivery date, etc.) and can aid in planning future projects. Items to track, if possible, are: Lines of code* for the project (total) Lines of code per module, average module size, distribution of sizes Complexity per module, average complexity, distribution of complexities Estimated and actual time to complete development (coding, unit testing, etc.) for a change request or PRACA Estimated and actual time to code a module. Estimated and actual time to unit test a module. Estimated and actual time for integration tests (black box) and system tests Number of defects found per test type (unit, integration, system, etc.) You may wish to categorize the defects for further breakdown. * Function points can be substituted for Lines of Code, or both numbers can be collected. From these raw inputs, you can determine (among other things): Number of defects per lines of code for your team/organization How good your estimations are for completion of a software change How much time it takes to unit testing. Correlate it with the defects/LOC to see if you need to spend more, or less, time unit testing. How much time to estimate for the various development phases (design, coding, testing) for your next project. How much time it will take to update the software for a future change request. Where to put extra resources in testing. If you are finding the majority of your defects in system testing, spend more time in unit and integration testing to find the defects earlier. If you made a software development process change, the numbers may show how much of an improvement the change made. 6.9.6 What to include in your SCM system Documents and Plans (specifications, formal design documents, verification matrix, presentation packages, etc.) Design information (data flow charts, UML or object-oriented design products, inputs to automatic code generation programs, etc.) Also include any miscellaneous related information. Interface information (Interface Control Documents, flow charts, message formats, data formats, etc.) Source Code Test Cases/Scenarios Test scripts, for manual or automated testing Test Reports Defect lists (or defect database) Change requests Problem reports/corrective actions Information for metrics, such as lines of code, number of defects, estimated and actual start or completion dates, and estimated/actual time to complete a change. 6.10 Operating Systems 6.10.1 Types of operating systems Operating Systems (OS) are the software that runs the programmers applications. The OS is loaded when the system boots up. It may automatically load the application program(s), or provide a prompt to the user. Examples of operating systems are MS-DOS, Windows 9x/NT, and Macintosh OS 9. Not all systems use an operating system. Small, embedded systems may load the application program directly. Or a simple scheduler program may take the place of the OS, loading the appropriate application (task) and switching between tasks. The types of operating systems are: No operating system. Just a boot loader (BIOS) program that loads the application directly and gives it control. The application deals with the computer hardware (processor) as well as any attached hardware, directly. This is sometimes used in small, embedded systems. Simple task scheduler. Usually written by the software developer. Acts as a mini-OS, switching between different applications (tasks). No other OS services provided. Embedded OS. This will be a fully-functional operating system, designed with small systems in mind. It will take a minimal amount of storage space (both RAM and disk). It provides task switching, some method for inter-task communications, shared resources, and other basic OS functions. Real-time Operating System. An RTOS is usually designed for embedded systems (small size), but this is not necessary for it to be real-time. An RTOS has timing constraints it must meet. If it cannot respond to events (interrupts, task switches, completing a calculation, etc.) within a specific time, then the result is wrong. Soft real-time systems have some flexibility in the timing. Hard real-time systems have no flexibility for critical deadlines. Regular Operating Systems. These systems have no timing constraints and are designed for systems that do not have limited resources. Examples are the Windows variants that run on PCs, Macintoshs OS 9, and main-frame operating systems. 6.10.2 Do I really need a real-time operating system (RTOS)? If an operating system is selected for use in the safety critical system, it will most likely be an RTOS. Even if the timing aspects arent important, most non-real-time operating systems are not designed for safety critical environments. What may be acceptable on your desktop (program freezes, frequent rebooting, or the blue screen of death) is not acceptable when safety is involved. The first question to answer is: Do I need an operating system? Small projects often use just a boot loader to boot up the system and load an application program. Get by without an RTOS by Michael Melkonian [2] describes a method that provides most operating system functionality. For small projects, such a system may be the best option. It avoids the overhead of having to learn an RTOS. And since commercial operating systems are COTS software, they would require extra analysis and testing in a safety critical application. Once you determine that you need (or want) an operating system, the next question is: build, reuse, or buy? Do you create your own operating system, reuse an existing, proprietary one, or purchase a commercial OS? If you have an existing OS that was used in safety critical applications before, or that has been thoroughly tested, it may be best to use that. Building your own OS is not an easy option. The advantage is that you can build in only what you need, eliminate options that might affect safety, and do formal development and/or thorough testing. For many systems, purchasing a commercial OS is the most cost-effective choice. This has the disadvantages associated with Off-The-Shelf software in general, but the advantages of time and money. The developers can spend time developing the applications, and not creating the operating system. 6.10.3 What to look for in an RTOS What makes an OS a RTOS? An RTOS (Real-Time Operating System) has to be multi-threaded and preemptible. It must support a scheduling method that guarantees response time, especially to critical tasks. Threads (tasks) must be able to be given a priority (static or dynamic). An alternative would be a deadline driven OS. The OS has to support predictable thread synchronization mechanisms (semaphores, etc.) A system of priority inheritance has to exist. OS behavior should be known. This includes the interrupt latency (i.e. time from interrupt to task run), the maximum time it takes for every system call, and the maximum time the OS and drivers mask the interrupts. The developer also need to know the system interrupt levels and device driver parameters (IRQ levels, maximum time within a device IRQ, etc.). Every system is unique, and there is no simple universal set of criteria for selecting an operating system. Some commonly encountered issues to consider in the selection process are: Memory management: Operating systems which support a dedicated hardware MMU (Memory Management Unit) are superior from a safety viewpoint. An MMU guarantees protection of the designated memory space. In a multitasking application, it ensures the integrity of memory blocks dedicated to individual tasks by preventing tasks from writing into each others' memory space. It protects each task from errors such as bad pointers in other tasks. For small, single task systems (such as those running on a microcontroller), an MMU may not be needed. In such cases, even more than a minimal operating system may be overkill. Determinism: Determinism is the ability of the operating system to: Meet deadlines Minimize jitter, (i.e. variations in time stamping instants, the difference between the actual and believed time instant of a sample) Remain steady under dynamic occurrences, e.g. off nominal occurrences, Bounding of priority inversion (time the inversion is in place). Priority inversion Priority inversion is a temporary state used to resolve priority conflicts. A low priority task is temporarily assigned a higher level priority to ensure its orderly completion prior to releasing a shared resource requested by a higher priority task. The priority change occurs when the higher priority task raises a "semaphore" flag. It is vital that the lower priority task releases its shared resource before delay of the higher priority task causes a system problem. The period of temporary increase of priority is called the "priority inversion" time. Priority inversion time can be defined by the application. Speed The context switching time is the most important speed issue for real-time operating systems. Context switching is how quickly a task can be saved, and the next task made ready to run. Other speed issues are the time it takes for a system call to complete. Interrupt latency Interrupt latency is how fast an interrupt can be serviced. Method of scheduling: The method of scheduling can be predetermined logical sequences (verified by Rate Monotonic Analysis). It can be priority-based preemptive scheduling in a multitasking environment (such as UNIX, Windows NT or OS2). Another method is "Round Robin" time slice scheduling at the same priority for all tasks. Cooperative schedule can also be used, where the task keeps control until it completes, then relinquished control to the scheduler. Cooperative scheduling may not be a good idea in a safety critical system, as a task can hog the processor, keeping the safety critical code from running. POSIX compliance(1003.1b/c). POSIX compliance is a standard used by many operating systems to permit transportability of applications between different operating systems. For single use software, or software that will never be used on other operating systems, this is not as important. Support for synchronization What support for synchronization and communication between tasks does the OS use? How much time does each method take? Support for tools Does the OS have support for tools such as debuggers, ICE (In Circuit Emulation) and multi-processor debugging. Consider also the ease of use, cost and availability of tools. Support for multiprocessor Does the OS support a multiprocessor configuration (multiple CPUs) if required. Language used to create the OS Consider the language in which the operating system kernel is written, using the same criteria as selecting application programming languages. Error handling in OS system calls How does the OS handle errors generated within system calls? What does the application need to check to verify that a system call operated correctly? Does the OS return an error code or can it access a user-created error handler? Safety Certifications Some operating systems go through levels of safety-related certification, often for use in medical or aviation applications. 6.10.4 Commonly used Operating Systems The following is a list of Operating Systems used in embedded or real-time systems. This is not a complete list, and nothing is meant by the ordering, inclusion, or exclusion of any OS. If the OS has been certified to any safety standard, that will be mentioned. VxWorks (Wind River Systems,  HYPERLINK "http://www.windriver.com/" http://www.windriver.com/) This is a popular RTOS with a tool-rich integrated development environment. There is a version of VxWorks certified to DO-178B, a standard used for aviation software. VxWorks is available for most higher-level processors. OSE ( HYPERLINK "http://www.enea.com/" http://www.enea.com) This operating system is certified to the safety standard IEC 61508. It is also being certified to DO-178B. OSE also provides an integrated development environment with many tools useful to embedded or real-time applications. OSE supports many processors, including DSPs. VRTX ( HYPERLINK "http://www.mentor.com/embedded/vrtxos/" http://www.mentor.com/embedded/vrtxos/) - VRTX is a a high-performance, modular, DO-178B-certifiable solution for PowerPC, ARM, 68K, CPU32, CPU32+, MCORE, and 80x86-based embedded systems. This RTOS is scalable, from a microkernel to a full-up operating system with POSIX, Java, priority inheritance, and other features. PSOSystem 3 ( HYPERLINK "http://www.windriver.com/products/html/psosystem3.html" http://www.windriver.com/products/html/psosystem3.html) This RTOS is now owned by Wind River. pSOSystem"! 3 is a modular, high-performance, memory protected, highly reliable real-time operating system, designed specifically for embedded microprocessors according to Wind River. pSOSystem supports the PowerPC and MIPS families of processors. QNX ( HYPERLINK "http://www.qnx.com/" http://www.qnx.com/) This RTOS supports x86 (80386 and higher) processors only. It uses a microkernel with minimal, required functionality that can be extended by dynamically plugging in service-providing processes. A free, non-commercial version is available for download and evaluation. CMX ( HYPERLINK "http://www.cmx.com/" http://www.cmx.com/) - CMX provides both a full-featured RTOS and a tiny one that runs on lower-level microcontrollers (with as little as 512 bytes of RAM). The RTOS supports a wide range of microprocessors. OS-9 ( HYPERLINK "http://www.microware.com/Products/Software/OS9.html" http://www.microware.com/Products/Software/OS9.html) According to Microware, OS-9 is a system-secure, fault-tolerant RTOS with high availability and reliability. Users can dynamically add and replace modules while the system is up and running. OS-9 supports many higher-level processors. AMX ( HYPERLINK "http://www.kadak.com/" http://www.kadak.com/) - This small, compact RTOS runs on x86, 68K family, Coldfire, ARM, PowerPC, and Z80 architectures. Depending on the processor, AMX can fit in 12K to 36K of ROM, with 2K to 4K of RAM needed. Besides the standard RTOS services, AMX claims rapid task context switching and fast interrupt response. Timing information is given on their website. LynxOS ( HYPERLINK "http://www.lynuxworks.com/products/whatislos.html" http://www.lynuxworks.com/products/whatislos.html) - LynxOS is a Linux-compatible real-time operating system that is a hard RTOS that combines performance, reliability, openness, and scalability together with patented technology for real-time event handling. It supports processors from Intel, Motorola, and MIPS. HYPERLINK ""  RTEMS ( HYPERLINK "http://www.rtems.com/" http://www.rtems.com/) RTEMS is a free, open source operating system. It was originally developed for the U.S. Army Missile Command. It contains all the basics of an RTOS, and the source code is available. RTEMS supports most commonly used processors. Linux ( HYPERLINK "http://www.linux.org" http://www.linux.org and  HYPERLINK "http://www.embedded-linux.org/" http://www.embedded-linux.org/) - Linux is the open source version of Unix. It is not normally a real-time operating system, but there are versions developed for embedded systems. In addition, there are extensions to Linux for real-time systems. Linux has been ported to many processors, though it is not usually available for cutting edge processors. You can roll your own version of Linux, creating a smaller system with only the elements you need. Windows NT/2000 ( HYPERLINK "http://www.microsoft.com" http://www.microsoft.com) - Windows NT (and its descendent, Windows 2000) are general purpose operating systems that have many real-time abilities. Out of the box, neither are hard real-time systems, but several companies provide extensions that meet the hard timing criteria. Windows NT/2000 are more robust than the desktop versions of Windows (95,98), with fewer memory leaks and greater memory protection. Windows CE ( HYPERLINK "http://www.microsoft.com" http://www.microsoft.com) - Microsoft describes Windows CE 3.0 as the modular, real-time, embedded operating system for small footprint and mobile 32-bit intelligent and connected devices that enables rich applications and services.. It supports many 32-bit microprocessors. It contains many real-time aspects, such as task priority, priority inheritance, and nested interrupts. It is not hard real-time, however. 6.11 Distributed Computing Having multiple processors working together may share the load of a complex calculation, or may distribute the appropriate part of a problem to a processor optimized for that particular calculation or control. Such multi-brained systems constitute a distributed computing system. Distributed systems can be defined as two or more independent processors, working together, and communicating across a medium that may have substantial transmission delays. Distributed computing is used infor many different purposes. In telecommunications systems, for For complex computational problems (parallel processor), and forproblems, parallel processors or clustered processors are used. Distributed computing is also used when high availability is required (continuing on if one processor has a hard failure), among other areas. Distributed systems may reside on the same processor board (multiprocessors), in the same system, or in widely separated areas. The processors in a distributed system usually use one of two main methods to communicate: shared memory or message passing. Shared memory systems have real or simulated RAM available to all the processors, and use this to communicate (pass values, signal use of resources, etc.). Shared memory is normally used in distributed systems that are physically compact (processors are near each other) and tightly coupled, such as multiprocessor systems. Shared memory allows large or complex data structures to be easily communicated between processes. Issues with shared memory distributed systems are Data consistency. The consistency of the data in shared memory (accuracy at any given moment) is a problem because of network latency. Most processes will cache the shared memory to improve performance. The value in Process As cache may be outdated, because process B has updated it, but delays may lead the update information to arrive after A has read the value. Various scenarios are implement to prevent this. Access synchronization. Distributed systems must also provide ways to prevent multiple processes from accessing the shared data at the same time. Usually a locking mechanism is used. Address space structure. A system may use a single shared distributed address space, where all the processes appear as threads within this space. The advantages is that objects appear at the same addresses on all nodes. However, security and protection are a major problem in such systems. Another approach divides each processs address space into fixed regions, some of which are shared and the rest are private. Shared data may or may not appear at the same address in each process. Fault tolerance. Distributed shared memory systems have some problems with fault tolerance. Most systems ignore it or maintain that it is an operating system issue. If one node that is sharing data with other processes fails, all the connected sites may fail as well. Message passing distributed systems communicate via a network, sending and receiving messages. Messages are blocks of data, usually wrapped in a protocol layer that contains information on the sender and recipient, time stamp, priority, and other parameters. A distributed system may pass messages synchronously or asynchronously. In synchronous message passing, the system has two phases: delivery and local computation. During the delivery phase, each process may send one or more messages to its immediate neighbors. The local computation phase encompasses receiving messages, state changes, and queuing messages to be sent during the next delivery phase. Asynchronous message passing distributed system do not have phases where messages are transmitted. Any process can send or receive messages at any time. Propagation delays are arbitrary, though in real systems they are often bounded (given a maximum delay value). The underlying network in a distributed system may or may not guarantee that all messages are eventually delivered to the correct recipient and that the messages will be without error or duplication. A distributed computing system may be fixed (known processors and processes that never change, except for failure) or dynamic. In a dynamic system, new processes can be added to the network and other processes can leave at arbitrary times. The protocols (message passing, usually) must adapt to the changing topology. Nodes (processes) in a distributed system can fail completely, intermittently (where the node operates correctly some of the time and at other times fails), or randomly (Byzantine). In the Byzantine failure mode, the node behaves arbitrarily, sending valid-looking messages inconsistent with the state it is in and the messages it has received. Failures can occur in the communications medium. Links between nodes can be broken. Intermittent problems may lead to the loss, garbling, reordering, or duplication of messages. Delays in message transfer may be interpreted as a lost message, or the data in the message, when it finally arrives, may be out of date.Breaking up software across (possibly) diverse multiple processors, communicating through some medium (serial line, network, etc.), creates a complex environment, full of potentially complex errors. Distributed systems have an inherent complexity resulting from the challenges of the latency of asynchronous communications, error recovery, service partitioning, and load balancing. Since distributed software is concurrent as well, it faces the possibility of race conditions, deadlock, and starvation problems. An excellent article that discusses the complexities of distributed computing is Distributed Software Design: Challenges and Solutions [13], whichdiscusses describes some problems inherent to distributed computing : Processing site failures. Each processor in a distributed system could fail. The developer must take this into account when building a fault-tolerant system. The failure must be detected, and the other processors must pick up the slack, which may involve reallocating the functionality among the remaining processors or switching to another mode of operation with limited functionality. Communication media failure (total loss, or loss between links). If the communication medium goes down, one or more processors are isolated from the others. Depending on how they are programmed, they may undertake conflicting activities. Communication media failure (intermittent). Intermittent failures include loss of messages, reordering of messages or data (arriving in a different order than when sent), and duplicating messages. They do not imply a hardware failure. Transmission delays. A delayed message may be misconstrued as a lost message, if it does not arrive before a timeout expires. Variable delays (jitter) make it hard to specify a timeout value that is neither too long nor too short. Delayed messages may also contain out-of-date information, and could lead to miscalculations or undesirable behavior if the message is acted on. Distributed agreement problems. Synchronization between the various processors poses a problem for distributed systems. It is even more difficult when failures (intermittent or complete) are present in the system. Impossibility result. It has been formally proven that it is not possible to guarantee that two or more distributed sites will reach agreement in finite time over an asynchronous communication medium, if the medium between them is lossy or if one of the distributed sites can fail. Heterogeneity. The processors and software involved in a distributed system are likely to be very different from each other. Integration of these heterogeneous nodes can create difficulties. System establishment. A major problem is how distributed sites find and synchronize with each other. [This is a placeholder for Distributed Systems. More needs to be added.]More information on distributed computing problems and solutions can be found at: 1) Distributed Software Design: Challenges and Solutions by Bran Selic, Embedded Systems Programming, Nov. 2000 2) FINITE STATE MACHINES IN DISTRIBUTED SYSTEMS, Class 307. Speaker: Knut Odman, Telelogic 3) Distrib. Syst. Eng. 3 (1996) 8695. Printed in the UK, Implementing configuration management policies for distributed applications, Gerald Krause y and Martin Zimmermann 6.12 Programmable Logic Devices Until recently, there was a reasonably clear distinction between hardware and software. Hardware was the pieces-parts: transistors, resistors, integrated circuits, etc. Software ran on the hardware (operating systems, applications programs) or resided inside the hardware (firmware). The design, construction, and testing process for hardware and software differed radically. Programmable logic devices (PLDs) blur the lines between hardware and software. Circuitry is developed in a programming language (such as VHDL or Verilog), run on a simulator, compiled, and downloaded to the programmable device. While the resulting device is hardware, the process of programming it is software. Some versions of programmable devices can even be changed on the fly as they are running. Programmable logic is loosely defined as a device with configurable logic and flip-flops, linked together with programmable interconnects. Memory cells control and define the function that the logic performs and how the logic functions are interconnected. PLDs come in a range of types and sizes, from Simple Programmable Logic Devices (SPLDs) to Field Programmable Gate Arrays (FPGAs). System safety normally includes hardware (electronic) safety. However, given the hybrid nature of programmable logic devices, software safety personnel should be included in the verification of these devices. Because PLDs are hardware equivalents, they should be able to be verified (tested) in a normal hardware way. However, because they are programmed devices, unused or unexpected interconnections may exist within the device as a result of software errors. These paths may not be tested, but could cause problems if accidentally invoked (via an error condition, single event upset, or other method). As the PLDs become more complex, they cannot be fully and completely tested. As with software, the process used to develop the PLD code becomes important as a way to give confidence that the device was programmed properly. The variety of programmable logic devices are described in the sections below. Guidance is given on the safety aspects of verifying each type of device. Frequently-Asked Questions (FAQ) About Programmable Logic [8] provides good introductory information on PLDs. An article in Embedded Systems programming [9] also gives a good introduction. 6.12.1 Types of Programmable Logic Devices Simple Programmable Logic Devices (SPLDs) are the original Programmable Logic Devices. These are the smallest of the PLDs each can replace only a few logic chips. Inside a PLD is a set of macrocells, each of which are composed of some amount of logic (AND gate, for example) and a flip-flop. Each macrocell is fully connected. SPLD types include PAL (Programmable Array Logic), GAL (Generic Array Logic), PLA (Programmable Logic Array), and PLD (Programmable Logic Device). Complex Programmable Logic Devices (CPLDs) have a higher capacity than the SPLDs, typically equivalent to 2 to 64 SPLDs. The macrocells within a CPLD may not be fully connected, so not all theoretically possible designs may be a implementable in a particular CPLD. Varieties of CPLDs include EPLD (Erasable Programmable Logic Device), PEEL, EEPLD (Electrically-Erasable Programmable Logic Device) and MAX (Multiple Array matrix). Field Programmable Gate Arrays (FPGAs) have an internal array of logic blocks, surrounded by a ring of programmable input/output blocks, connected together via programmable interconnects. These devices are more flexible than CPLDs, but may be slower for some applications because that flexibility leads to slightly longer delays within the logic. 6.12.2 Program Once Devices Program once devices require an external programming mechanism and cannot be reprogrammed, once inserted on the electronics board. Included in this category are erasable/reprogrammable devices and on-the-board reprogrammable devices where the ability to reprogram is removed or not implemented, as well as true write once devices. Simple Programmable Logic Devices (SPLDs) nearly always are program once. Depending on the underlying process technology used to create the devices, CPLDs and FPGAs may be program once, field reprogrammable, or fully configurable under operating conditions. With program once devices, safety only needs to be concerned with the resulting final chip. Once the device is verified, it will not be changed during operations. Simple Programmable Logic Devices (SPLDs) are fairly simple devices. Dont worry about the development process, just test as if they were regular electronic devices. Treat them as hardware. Complex Programmable Logic Devices (CPLDs) and Field Programmable Gate Arrays (FPGAs) are complex enough that unexpected connections, unused but included logic, or other problems could be present. Besides a complete test program that exercises all the inputs/outputs of the devices, the software should be developed according to the same process used for regular software, tailored to the safety-criticality of the device/system. Requirements, design, code, and test processes should be planned, documented, and reviewed. For full safety effort, analyses should be performed on the documents from each stage of development. 6.12.3 Reprogram in the Field Devices Both Complex Programmable Logic Devices (CPLDs) and Field Programmable Gate Arrays (FPGAs) come in a in-the-field-programmable variety. The internals of these devices is based on EEPROM (Electrically-Erasable Programmable Read Only Memory) and FLASH technology. If the circuitry is present on the board (and implemented within the chip), then these devices can be reprogrammed while on their electronics board. This is not on-the-fly reprogramming while in operation. Reprogramming erases what was there and totally replaces what was in the chip. Once scenario that might be used is to hook up the CPLD/FPGA reprogramming circuitry to an external port, such as a serial port. During development, an external computer (laptop, etc.) is connected to the port, and the device is reprogrammed. When no computer is connected, the device cannot be reprogrammed. This scenario allows for changes in the device during development or testing, without having to physically disassemble the instrument and remove the device from the electronics board. Another scenario could be that a new CPLD/FPGA program is sent to a microprocessor in the system, which would then reprogram the CPLD/FPGA. The ability to do this would have to be included in the microprocessors software, as well as the physical circuitry being present. This scenario would allow the device to be reprogrammed in an environment where physical connection is impossible, such as in orbit around Earth. When the device can only be reprogrammed by making a physical connection, it is relatively safe during operation. A software error in the main computer (processor) code, or a bad command sent by a human operator, is not going to lead to the unexpected reprogramming of the device. The main concern is that reprogramming invalidates all or most of the testing that has gone before. The later the reprogramming is done in the system development cycle, the riskier it is. A set of regression tests should be run whenever the device is reprogrammed, once the instrument is out of the development phase. If the device can be reprogrammed by an in-system processor, then the possibility exists that it could accidentally be reprogrammed. If the device is involved in a hazard control or can cause a hazardous condition, this could be very dangerous. The legitimate command to reprogram the device would be considered a hazardous command. Commanding in general would have to be looked at closely, and safeguards put in place to make sure the reprogramming is not accidentally commanded. Other checks should be put in place to make sure that software errors do not lead to the unintentional reprogramming of the device. 6.12.4 Configurable Computing Some FPGAs (and CPLDs) use SRAM (Static RAM) technology inside. These SRAM-based devices are inherently re-programmable, even in-system. However, they require some form of external configuration memory source on power-up. The configuration memory holds the program that defines how each of the logic blocks functions, which I/O blocks are inputs and outputs, and how the blocks are interconnected together. The device either self-loads its configuration memory or an external processor downloads the memory into the device. The configuration time is typically less than 200 ms, depending on the device size and configuration method. The ability to change the internal chip logic on the fly can be very useful in some applications, such as pattern matching, encryption, and high-speed computing. Configurable computings key feature is the ability to perform computations in hardware to increase performance, while retaining much of the flexibility of a software solution. Applications that benefit the most from configurable computing solutions are those with extremely high I/O data rate requirements, repetitive operations on huge data sets, a large number of computational operations per input data point, or with a need to adapt quickly to a changing environment. One strength of configurable computing machines (CCMs) is their inherent fault tolerance. By adding structures to detect faults, the hardware can be reconfigured to bypass the faults without having to shut down the instrument. The downside of flexibility, however, is the difficulty in verifying the functionality and safety of a configurable system. When you can change the hardware in the middle of an operation, how do you assure its safety? That question has not been well addressed yet, as configurable computing is still a new concept. However, if your design uses CCMs, then consider very carefully how to test and verify them, as well as how to guard against internal and external errors or problems. An article by Villasenor and Mangione-Smith [10] discusses various aspects of configurable computing. 6.12.5 Safety and Programmable Logic Devices IEC 1131-3 is the international standard for programmable logic controller (PLC) programming languages. As such, it specifies the syntax, semantics and display for the following suite of PLC programming languages: Ladder diagram (LD) Sequential Function Charts (SFC) Function Block Diagram (FBD) Structured Text (ST) Instruction List (IL) However, IEC 1131-3 does not address safety issues in programming PLCs. The SEMSPLC project was developed to address those issues. They have issued SEMSPLC Guidelines: safety-related application software for programmable logic controllers [11], available from the Institution of Electrical Engineers. Language choice for the PLC should meet with the standard IEC 1131-3. Coding standards should be created. In addition, the language should conform to the following criteria, if possible: Closeness to application domain Definition/standardization Modular Readable/understandable Traceability Checkable Analyzable Deterministic Standard software engineering practices should be used in the creation of PLC software. This includes requirements specification, design documentation, implementation, and rigorous testing. For safety-critical systems, Formal Inspections should be used on the products from the various stages, in particular on the requirements. PLC development can be very formal, up to and including using Formal Methods. However, tailoring of many of the techniques for PLC development has not been done. This an emerging field that requires much more study, to determine the best development practices and the best safety verification techniques. Besides the SEMSPLC guidelines [11], some general guidelines for better (and safer) PLC programming are: Spaghetti code results in spaghetti logic. Create a coding style/standard and stick to it. The better your code is, the faster or smaller the resulting logic will be! Keep it under 85. Dont use more than 85% of the available resources. This makes it easier to place-and-route the design and allows room for future additions or changes. Modularize. As much as possible, use modules in your PLC software. This helps avoid spaghetti code, and aids in testing and debugging. Use black-and-white testing. Use both black-box and white-box testing. Verify proper response to inputs (black-box), and also that all paths are executed (white-box), for example. Research safety techniques for PLC application. Work is being done to see which software engineering techniques are useful for PLC applications. Work by the SEMSPLC project has shown that control-flow analysis produces little information on the errors, but mutation analysis and symbolic execution have revealed a number of complex errors. [12] 6.13 Embedded Web Technology Everything is connected to everything elsethrough the Internet, or so it seems. Once the realm of academics sharing research data and ideas, the Internet (and its multimedia child, the World Wide Web) is now the medium for information exchange, conversation, and connectivity. From the Embedded Web Technology site at the NASA Glenn Research Center ( HYPERLINK "http://vic.lerc.nasa.gov/" http://vic.lerc.nasa.gov/): Embedded Web Technology (EWT) is the application of software developed for the World Wide Web to embedded systems. Embedded systems contain computers, software, input sensors and output actuators all of which are dedicated to the control of a specific device. Examples of devices with embedded systems include cars, household appliances, industrial machinery, and NASA Space Experiments. EWT allows a user with a computer and Web browser to monitor and/or control a remote device with an embedded system over the Internet using a convenient, graphical user interface. Many embedded devices are now including web servers and network hardware for communications with the outside world, instead of (or in addition to) serial or parallel ports. Some devices (Internet appliances) have no user interface hardware (keyboard, monitor). The user connects through any computer with a web browser to interact with the appliance. Embedded web servers allow remote access to the instrument (hardware) from nearly anywhere in the world. Instruments can operate as distributed systems, with a central processor and various microcontrollers, communicating back and forth via a network. In the same way that multi-tasking operating systems break up the application software into various independent tasks, a distributed system breaks up the tasks and runs them on specialized or remote processors. A distributed instrument will have the same problems as described in  HYPERLINK \l "_6.11_Distributed_Computing" Section 6.11 Distributed Computing. 6.13.1 Embedded Web Servers Most web server software is designed for desktop systems, with a keyboard, monitor, file system, and large hard-disk. For embedded systems, the web server needs to be scaled down, as well as addressing some embedded-specific issues. Reducing the memory footprint, increasing efficiency and reliability, and source portability are important in the embedded world. The requirements for an embedded web server include [14]: Memory usage. A small memory footprint (amount of memory used, including code, stack, and heap) is a very important requirement for an embedded web server. Memory fragmentation is also important, as frequently creating and destroying data (such as web pages) may create a myriad of tiny memory blocks that are useless when a larger memory block must be allocated. If the embedded software does not provide memory defragmentation, then embedded web servers should use only statically allocated or pre-allocated memory blocks. Support for dynamic page generation. Most of the HTML pages produced by embedded systems are generated on the fly. An embedded device will have only a few pages in memory and will often generate part or all of their content in real-time. The current status of the device, sensor values, or other information may be displayed on the dynamically generated page. Software integration. Without source code, integrating the web server with the embedded operating system and applications code may be difficult or impossible. When source code is available, ease of integration (and good documentation!) are still important factors to consider. ROMable web pages. Embedded systems without disk drives often store their data in ROM (or flash memory), sometimes within the executable file, and sometimes external to it. The ability to pull out an HTML file or other data from the executable, or find it on the flash disk, is lacking in most desktop-based web servers. Portability. Nothing stays the same in the embedded world. Technology changes fast, and the processor or operating system used today may be obsolete tomorrow. The ability to port the web server to different processors or operating systems is important for long-term usage. 6.13.2 Testing Techniques Some aspects of standard web-site testing do not apply to embedded web servers. However, consider checking the following areas: Load Handling Capacity. What is the total data rate the server can provide? How many transactions per second can the server handle? Are these values in line with the expected usage? What happens when the limits are exceeded? User Interface. Even if the web pages are not meant for world-wide viewing, there is a customer or two who need to view the provided data. Review the generated web pages for clarity of communication (tone, language), accessibility (load time, easy to understand and follow links), consistency (look and feel, repeating themes), navigation (links obvious in intent and destination, standard way to move between pages), design (page length, hyperlinks), and visual presentation (use of color, easy on the eyes). Data age. Is there a way to know how fresh the data is? Is the data time-tagged? When the data refreshes, does it show up on the web page? Speed of page generation. Since most pages are generated on the fly in embedded web servers, the speed at which they are constructed is important. Can the user break the system? Check all user-input elements on the web page (buttons, etc.). Try combinations of them, strange sequences, etc. to see if the user can create problems with the web server. If you have a colleague who seems to break his web client on a regular basis, put him to work testing your system. Security testing. If you will not be on a private network, test the security provision in your web server. Can an unauthorized user get into the system? Link testing. If you provide links between pages, make sure they are all operational. HTML and XML validation. Is the HTML and/or XML standard? Will it work with all the browsers expected to interface with the web server? All browser versions? Control of instrumentation. If the embedded server provides a way to control instrumentation, test this thoroughly. Look for problems that might develop from missed commands, out of sequence commands, or invalid commands. Error handling. Does the web page handle invalid input in a graceful way? Do any scripts used anticipate and handle errors without crashing? Are run time handlers included? Do they work? Good sources for information on error handling and website testing are: Handling and Avoiding Web Page Errors Part 1: The Basics (and parts 2 and 3 as well)  HYPERLINK "http://msdn.microsoft.com/workshop/author/script/weberrors.asp" http://msdn.microsoft.com/workshop/author/script/weberrors.asp WebSite Testing,  HYPERLINK "http://www.soft.com/eValid/Technology/White.Papers/website.testing.html" http://www.soft.com/eValid/Technology/White.Papers/website.testing.html Beyond Broken Links,  HYPERLINK "http://www.dbmsmag.com/9707i03.html" http://www.dbmsmag.com/9707i03.html WebSite Performance Analysis,  HYPERLINK "http://solo.dc3.com/white/wsperf.html" http://solo.dc3.com/white/wsperf.html User Testing Techniques A Reader-Friendliness Checklist  HYPERLINK "http://www.pantos.org/35317.html" http://www.pantos.org/35317.html 6.14 AI and Autonomous Systems Artificial Intelligence (AI) and Autonomous Systems reside on the cutting edge of software technology. They are two separate entities that, combined, have the potential to create systems that can operate in changing environments without human control. Space exploration, particularly in environments far from Earth, where human intervention would come far too late, is an ideal use for Intelligent Autonomous Systems. Artificial Intelligence encompasses any system where the software must think like a human. This involves information gathering, information pattern recognition, planning, decision making, and execution of the decision. Thats a lot for a software system to do! Various aspects of AI includes: Game Playing. Games such as chess or checkers. Expert Systems. Systems that capture a large body of information about a domain to answer a question posed to them. Diagnosing a disease based on symptoms is one example of an expert system. Agents. A computational entity which acts on behalf of other (most often human) entities in an autonomous fashion, performs its actions with proactivity and/or reactiveness and exhibits some level of learning, co-operation and mobility. For example, an agent may perform independent searches for information, on the Internet or other sources, based on subjects needed for an upcoming technical meeting you will be attending. Natural Language. Understanding and processing natural human languages. Neural Networks. Connecting the information nodes in ways similar to the connections within an animal brain. Neural nets learn with repetitive exercising. Robotics. Controlling machines that can see or hear (via sensors) and react to their environment and input stimuli. AI robots have a thinking capability, unlike factory robotics that perform specific functions as programmed. Whereas several versions of AI can exist independent of hardware (e.g. on a desktop computer), autonomous systems almost always control real-world systems. A robot that operates without human intervention, except for the issuance of orders (clean the first floor on Tuesday night, the second and third on Wednesday, ) is an example of an autonomous system. One definition for an autonomous system is a combination of a computational core, sensors and motors, a finite store for energy, and a suited control allowing, roughly speaking, for flexible stand-alone operation.[21] This section focuses on Intelligent Autonomous Systems that control hardware systems capable of causing hazards. As a technology on the cutting edge, methods to design, code, test, and verify such systems are not well known or understood. The issue of assuring the safety of such systems is being researched, but the surface has barely been scratch. Hopefully, much more will be learned in the coming years about creating and verifying safety critical Intelligent Autonomous Systems. In the future, when you travel to Jupiter in cryogenic sleep, with an Intelligent Autonomous System operating the spacecraft and watching your vital signs, you want it to operate correctly and safely. HAL 9000 needed a bit more software verification! 6.14.1 Examples of Intelligent Autonomous Systems (IAS) Intelligent spacecraft are one promising application of IAS. In the past, the on-board software had some built-in intelligence and autonomy in responding to problems, but planning for both the mission and any failures was performed by humans back on Earth. As we send probes out into the far reaches of the solar system, where communications lag time is measured in hours, having a spacecraft that can think for itself would be useful. Even for nearby objects, such as Mars, the communications lag time is enough to cause problems in a rover moving at speed over a varied terrain. Removing the human from the details of operation can increase the amount of science returned, as intelligent spacecraft and robots no longer have to wait for responses from Earth whenever a problem is encountered. The Deep Space 1 mission focused on technology validation, and contained an experiment in Intelligent Autonomous Systems. Called Remote Agent, it actually controlled the spacecraft for several days, responding to simulated faults. This experiment is described and discussed in  HYPERLINK \l "_6.14.3_Case_Study" Section 6.14.3 Case Study. Back down to Earth, cleaning office buildings is a monotonous, dirty, dull and low-esteem task that does not use the higher faculties of human intelligence. Intelligent mobile cleaning robots are currently under development to automate the process [16], moving humans from grunts to supervisors. Fly-by-wire aircraft systems have replaced hydraulic control of the aircraft with computer control (via wire to electromechanical hardware that moves the parts or surfaces). The computer keeps the aircraft stable and provides smoother motions than would be possible with a strictly mechanical system. Computers also provide information to the pilots, in the form of maps, trajectories, and aircraft status, among other items. At this time, most of the fly-by-wire systems are not intelligent. Humans still direct the systems, and usually have overrides if the system misbehaves. However, the trend is to move the human pilot farther from direct control of the aircraft, leaving the details to the computerized system. At some time in the future, fly-by-wire computers could control nearly all aircraft functions, with the pilot providing guidance (where the plan should go) and oversight in the case of a malfunction. Despite the fact that software used in aircraft is subjected to a stringent development process and thorough testing, an increasing number of accidents have computer problems as a contributing factor. In some cases, the computer displayed inaccurate information, which misled the flight crew. In others, the interface between the pilot and software was not well designed, leading to mistakes when under pressure. This points to the increased likelihood of aircraft accidents as computers and software become the pilots. Finding reliable methods for creating and verifying safe software must become a priority. The Intelligent Transportation System (ITS) is being researched and developed under the direction of the US Department of Transportation ( HYPERLINK "http://www.its.dot.gov/" http://www.its.dot.gov/). Major elements of ITS include: Advanced Traffic Management Systems (ATMS) which monitor traffic flow and provide decision support to reduce congestion on highways. Advanced Traveler Information Systems (ATIS) which rovide travelers with directions, route assistance and real-time information on route conditions. Automated Highway Systems (AHS) which support and replace human functions in the driving process Intelligent Vehicle Initiative (IVI) which focuses efforts on developing vehicles with automated components Advanced commercial vehicle Systems (ACS) which provide support for commercial vehicle operations including logistics. Software safety will obviously be important in developing ITS, though information on how it will be implemented has been difficult to come by. The DOT document on Software Acquisition for the ITS, which consists of over 250 pages, devotes only 4 pages to Software Safety issues. 6.14.2 Problems and Concerns Like distributed systems and other complex software technologies, verifying the safety of Intelligent Autonomous Systems poses a large problem. Remember that for NASA, safety means more than just injury or death. Safety refers to the vehicle and payload as well. So even though no one can be killed by your space probe, loss of the probe would be a safety issue! The complex interactions that occur between hardware and software must be considered for Intelligent Autonomous Systems, as for any software that controls hardware. In addition, the choices made by the software (plans and decisions based on past performance, current hardware status, and desired goals) form a subset of millions of possible paths the system may take. If that subset was know, it could be thoroughly tested, if not formally verified. The number of paths, and the complexities of the interactions between various software modules and the hardware, make complete testing or formal verification essentially impossible. Various areas of concern with Intelligent Autonomous Systems (IAS) are: Technology is more complicated and less mature. Intelligent Autonomous Systems are on the cutting edge of software technology. Sensitivity to the environment/context. Traditional flight software (and other complicated embedded software) was designed to be independent of the system environment or software context. When a command was received, it was executed, regardless of what the spacecraft was doing at the time (but within the safety/fault tolerance checks). Whether or not the command made sense was the responsibilities of the humans who sent it. An IAS, on the other hand, must know what the environment and system context are when it generates a command. It must create a command appropriate to the system state, external environment, and software context. Increased Subsystem interactions. Traditional software systems strive for minimal interactions between subsystems. That allows each subsystem to be tested independently, with only a minimal integrated system testing. IAS subsystems, however, interact in multiple and complicated ways. This increases the number of system tests that must be performed to verify the system. Complexity. Intelligent Autonomous Systems are complex software. Increased complexity means increased errors at all levels of development specification, design, coding, and testing. New software technology often stresses the ability of traditional verification and validation techniques to adequately authenticate the system. You cant formally specify and verify the system without tremendous effort, you cannot test every interaction, and there is no way to know for certain that every possible failure has been identified and tested! The state-of-the-art for Intelligent Autonomous System (IAS) verification has focused on two areas: Testing (primarily) and Formal Verification (Model Checking). Simmons, et. al. [20] discuss using  HYPERLINK \l "_4.2.4__Model" model checking for one subset of IAS: application-specific programs written in a specialized, highly-abstracted language, such as used by Remote Agent. The application programs are verified for internal correctness only, which includes checks for liveness, safety, etc. Testing issues with Remote Agent are discussed in  HYPERLINK \l "_6.14.3.2_Testing_and" 6.14.3.2. An additional testing strategy is described by Reinholtz and Patel [19]. They propose a four-pronged strategy, starting with  HYPERLINK \l "_4.2.3_Formal_Methods" formal specifications of correct system behavior. The software is tested against this specification, to verify correct operations. Transition zones (areas of change and interaction among the subsystems) are identified and explored to locate incorrect behavior. The fourth element of the strategy is to manage risk over the whole lifecycle. 6.14.3 Case Study Remote Agent on Deep Space 1 Remote Agent is an experiment in Intelligent Autonomous Systems. It was part of the NASA Deep Space 1 (DS-1) mission. The experiment was designed to answer the question Can a spacecraft function on its own nearly 120 million kilometers from Earth, without detailed instructions from the ground? Remote Agent was originally planned to have control of DS-1 for 6 hours (a confidence building experiment) and for 6 days. Due to various problems, the experiment was replanned for a 2 day period in May, 1999. During the experiment run, Remote Agent controlled the spacecraft and responded to various simulated problems, such as a malfunctioning spacecraft thruster. Remote Agent functioned very well, though not flawlessly, during its two day experiment. 6.14.3.1 Remote Agent Description Remote Agent (RA) is a model-based, reusable, artificial intelligence (AI) software system that enables goal-based spacecraft commanding and robust fault recovery[48]. To break that statement down into its component parts: Model based. A model is a general description of the behavior and structure of the component being controlled, such as a spacecraft, robot, or automobile. Each element of Remote Agent (RA) solves a problem by accepting goals, then using reasoning algorithms on the model to assemble a solution that meets the goals. Reusable. Parts of the Remote Agent were designed to be system independent and can be used in other systems without modification. Other aspects are system dependent, and would need modification before being used in a different system. Artificial Intelligence. Remote Agent thinks about the goals and how to reach them. Goal-based commanding. Instead of sending Remote Agent a sequence of commands (slew to this orientation, turn on camera at this time, begin taking pictures at this time, etc.), RA accepts goals such as For the next week, take pictures of the following asteroids, keeping the amount of fuel used under X. Goals may not be completely achievable (parts may conflict) and Remote Agent has to sort that out. Robust fault recovery. Remote Agent can plan around failures. For example, if one thruster has failed, it can compensate with other thrusters to achieve the same maneuver. The Remote Agent software system consists of 3 components: the Planner/Scheduler, the Executive, and the MIR (Mode Identification and Reconfiguration, also called Livingstone). The Planner/Scheduler (PS) generates the plans that Remote Agent uses to control the spacecraft. It uses the initial spacecraft state and a set of goals to create a set of high-level tasks to achieve those goals. PS uses its model of the spacecraft, including constraints on operations or sequence of operations, to generate the plan. The Executive requests plans from PS and executes them. It also requests/executes failure recoveries from MIR, executes goals and commands from human operators, manages system resources, configures system devices, provides system-level fault protection, and moves into safe-modes as necessary. Its a busy little program! The Mode Identification and Reconfiguration (MIR) element diagnoses problems and provides a recovery mechanism. MIR needs to know what is happening to all components of the spacecraft, so it eavesdrops on commands sent to the hardware by the Executive. Using the commands and sensor information, MIR determines the current state of the system, which is reported to the Executive. If failures occur, MIR provides a repair or workaround scenario that would allow the plan to continue execution. 6.14.3.2 Testing and Verification of Remote Agent The main problem in testing Remote Agent was that the number of possible execution paths through the software was on the order of millions. Unlike traditional spacecraft flight software, where a sequence of operations was uplinked after ground verification, Remote Agent had to think for itself, identifying problems and taking corrective action, in order to achieve the goals. It is impossible to test all these execution paths within the software, at least within the lifetime of the tester, if not the universe! For the Remote Agent experiment, a scenario-based verification strategy was augmented with model-based verification and validation [Smith et. al. 17]. The universe of possible inputs (goals, spacecraft state, device responses, timing, etc.) is partitioned into a manageable number of scenarios. Remote Agent is exercised on each scenario and its behavior is verified against the specifications. Going from millions or billions of possible tests down to a manageable number (200 to 300) entails adding risk. If you test everything, you know how the system will respond in any possible scenario. When you test a small subset, there is the risk that you missed something important some scenario where the interactions among the subsystem are not what you expected. You must be able to have confidence that the tested scenarios imply success among the untested scenarios. The effectiveness of scenario-based testing depends largely on how well the scenarios cover the requirements. This means that not only is the requirement tested, but that the selected inputs for the tests give confidence that the requirement works for all other inputs. The Remote Agent experiment used a parameter-based approach to select the scenarios and inputs to use. Three methods were used to achieve good coverage while maintaining manageability: Abstracting parameter space to focus on relevant parameters and values. Parameters and parameter values were selected to focus on areas where the software was most sensitive. Equivalence classes were used to generalize from these inputs to a collection of comparable tests that would not be performed. Identifying independent regions of the parameter space. Areas where there is low or no interactions mean that fewer combinations of parameters/values must be tested. When there is strong interaction among parameters, more combinations must be tested. Using orthogonal arrays to generate minimal-sized test suites that cover the parameter combinations. Every parameter value and every pair of values appears in at least one test case. Every parameter value appears in about the same number of cases. One difficulty encountered during testing was that it was difficult to know what parameter value lead to failure. To overcome this, a collection of test cases identical to the faulty one was generated, with each test identical except for one parameter. This allowed the value leading to the error to be identified. Another difficulty in any form of spacecraft software testing is the lack of high-fidelity test beds. The closer the test bed is to flight fidelity, the less time you will get on it. To deal with this issue, the Remote Agent experiment performed tests on highly abstract software (software that did not care about the underlying hardware) on low-fidelity test beds, and reserved the high-fidelity test beds for hardware-specific software and general system verification. Remote Agent automated the testing process, and in some cases warped the time, so that a multi-day test could be completed in hours. Software was also written to help with understanding the test results. These not only allowed more tests to be done in the limited period of time, but increased the chance of spotting errors. Reviewing log files is very tedious, and errors can be missed. Additional information on the verification and testing of the Remote Agent experiment can be found in Smith et. al. [17]. Bernard et. al. [18] discusses the testing strategy, as well as the in-flight validation of Remote Agent. 6.14.3.3 In-flight Validation: How well did it work? Even before flight, problems with some aspects of the testing strategy were noted. During the last four months before flight, after the formal testing phase had ended, a large number of new problems were discovered. Most of the problems related to the planning system operating correctly, but unable to find a plan within the time constraints. Several reasons were identified: Range of some parameters differed from those assumed for testing. Disappearance of slack time in going from the 6 day to 2 day scenario revealed brittleness in the Planner chronological backtracking search. The test generator only considered pair-wise interactions. Some problems depended on the specific values of 3 or more parameters. During the flight experiment, a problem developed with Remote Agent not terminating the Ion Propulsion System (IPS) thrusting as expected. Plan execution appeared to be blocked, but the Remote Agent and the spacecraft were both healthy. The cause was later identified as a missing critical section in the plan-execution code. This created a race condition between two Executive threads. If the wrong thread won, a deadlock condition would occur where each thread was waiting for an event from the other. This occurred in flight, though not on the ground, despite thousands of previous races during the ground testing. The following is drawn from the Remote Agent Lessons Learned: Basic system must be thoroughly validated with a comprehensive test plan as well as formal methods, where appropriate. Automatic code generation of interface code, telemetry, model interfaces, and test cases was enormously helpful. Better model validation tools are needed. Automated test running capability helped increase the number of off-nominal tests that could be run. However, manual evaluation of the test results was laborious. Confidence in complex autonomous behaviors can be built up from confidence in each individual component behavior. Ground tools need to be created early and used to test and understand how to operate the complex flight system. For Remote Agent, the ground tools were developed very late and many of them were not well integrated. Ensuring sufficient visibility into the executing software requires adequate information in the telemetry. Design the telemetry early and use it as the primary way of debugging and understanding the behavior of the system during integration, test, and operations. As the problems found in late ground operations and flight operations show, the testing strategy was not 100% successful. In particular, a timing problem that rarely occurred was missed because it never happened on the ground. More work needs to be done on the verification and validation of Intelligent Autonomous Systems, especially if they are to have control over safety critical functions and equipment. Remote Agent had backup from the flight software and hardware hazard controls. It was a successful experiment that shows promise for the future. But it is not quite ready for complete control of safety-critical systems. 6.15 Good Programming Practices for Safety Besides all the practices discussed in the programming languages sections, there are some simple ways to make your software safer. The lists below come from various sources, which are referenced. In addition, they are summarized in a checklist in Appendix E. The following list is from Solving the Software Safety Paradox by Doug Brown [1]. See that article for more details. CPU self test. If the CPU becomes partially crippled, it is important for the software to know this. Cosmic Radiation, EMI, electrical discharge, shock, or other effects could have damaged the CPU. A CPU self-test, usually run at boot time, can verify correct operations of the processor. If the test fails, then the CPU is faulty, and the software can go to a safe state. Guarding against illegal jumps. Filling ROM or RAM with a known pattern, particularly a halt or illegal instruction, can prevent the program from operating after it jumps accidentally to unknown memory. On processors that provide traps for illegal instructions (or a similar exception mechanism), the trap vector could point to a process to put the system into a safe state. ROM tests. Prior to executing the software stored in ROM (EEPROM, Flash disk, etc.), it is important to verify its integrity. This is usually done at power-up, after the CPU self test, and before the software is loaded. However, if the system has the ability to alter its own programming (EEPROMS or flash memory), then the tests should be run periodically. Watchdog Timers. Usually implemented in hardware, a watchdog timer resets (reboots) the CPU if it is not tickled within a set period of time. Usually, in a process implemented as an infinite loop, the watchdog is written to once per loop. In multitasking operating systems, using a watchdog is more difficult. Do NOT use an interrupt to tickle the watchdog. This defeats the purpose of having one, since the interrupt could still be working while all the real processes are blocked! Guard against Variable Corruption. Storing multiple copies of critical variables, especially on different storage media or physically separate memory, is a simple method for verifying the variables. A comparison is done when the variable is used, using two-out-of-three voting if they do not agree, or using a default value if no two agree. Also, critical variables can be grouped, and a CRC used to verify they are not corrupted. Stack Checks. Checking the stack guards against stack overflow or corruption. By initializing the stack to a known pattern, a stack monitor function can be used to watch the amount of available stack space. When the stack margin shrinks to some predetermined limit, an error processing routine can be called, that fixes the problem or puts the system into a safe state. Program Calculation Checks. Simple checks can be used to give confidence in the results from calculations. 30 Pitfalls for Real-Time Software Developers, by David B. Stewart [4][5] discusses problems faced by real-time developers. Of the problems he considers, the following are especially applicable to safety and reliability: Delays implemented as empty loops. This can create problems (and timing difficulties) if the code is run on faster or slower machines, or even if recompiled with a newer, optimizing compiler. Interactive and incomplete test programs. Tests should be planned an scripted. This prevents tests from being missed. Also, functional tests should be run after a change, to make sure that the software change did not indirectly impact other code. Reusing code not designed for reuse.. If the code was not designed for reuse, it may have interdependencies with other modules. Usually, it will not use abstract data types (if object-oriented) or have a well-defined interface. Code that is not designed for reuse will not be in the form of an abstract data type or object. The code may have interdependencies with other code, such that if all of it is taken, there is more code than needed. If only part is taken, it must be thoroughly dissected, which increases the risk of unknowingly cutting out something that is needed, or unexpectedly changing the functionality. One big loop. When real-time software is designed as a single big loop, we have no flexibility to modify the execution time of various parts of the code independently. Few real-time systems need to operate everything at the same rate.A single large loop forces all parts of the software to operate at the same rate. This is usually not desirable. No analysis of hardware peculiarities before starting software design. Different processors have peculiarities that can affect the time a calculation can take, or how long it takes to access an area of memory, for instance. Understanding the hardware before designing the software will decrease the number of gotchas at integration time. Fine-grain optimizing during first implementation. Some programmers foresee anomalies (some are real, some are mythical). An example of a mythical anomaly is that multiplication takes much longer than addition. Too many inter-module dependencies. To maximize software reusability, modules should not depend on each other in a complex way. Only a single design diagram. Most software systems are designed such that the entire system is defined by a single diagram (or, even worse, none!). When designing software, getting the entire design on paper is essential. Error detection and handling are an afterthought and implemented through trial and error. Design in the error detection and handling mechanisms from the start. Tailor the effort to the level of the code dont put it everywhere! Look at critical locations where data needs to be right or areas where the software or hardware are especially vulnerable to bad input or output. No memory analysis. Check how much memory your system uses. Estimate it from your design, so that you can adjust the design if the system is bumping up against its limits. When trying to decide between two different implementations of the same concept, knowing the memory usage of each will help in making a decision. Documentation was written after implementation.Everyone knows that the system documentation for most applications is dismal. Many organizations make an effort to make sure that everything is documented, but documentation isn't always done at the right time. The problem is that documentation is often done after the code is written.Write what you need, and use what you write. Dont make unnecessarily verbose or lengthy documentation, unless contractually required. It is better to have short documents that the developers will actually read and use. Indiscriminate use of interrupts. Use of interrupts can cause priority inversion in real-time systems if not implementedcarefully. Interrupts are perhaps the biggest cause of priority inversion in real-time systems, causing the system to not meet all of its timing requirements. The reason for this delay is that interrupts preempt everything else and aren't scheduled.Thiscarefully. This can lead to timing problems and the failure to meet necessary deadlines. No measurements of execution time. Many programmers who design real-time systems have no idea of the execution time of any part of their code. Bill Wood, in Software Risk Management for Medical Devices[15], Table III, gives a list of mitigation mechanisms for various possible failures. Some of the practices that are not duplicated in the lists above are summarized below (and expanded upon): Check variables for reasonableness before use. If the value is out of range, there is a problem memory corruption, incorrect calculation, hardware problems (if sensor), etc. Use execution logging, with independent checking, to find software runaway, illegal functions, or out-of-sequence execution. If the software must follow a known path through the modules, a check log will uncover problems shortly after they occur. Come-from checks. For safety critical modules, make sure that the correct previous module called it, and that it was not called accidentally by a malfunctioning module. Test for memory leakage. Instrument the code and run it under load and stress tests. See just how the memory usage changes, and check it against the predicted usage. Use readbacks to check values. When a value is written to memory, the display, or hardware, another function should read it back and verify that the correct value was written. In addition to the suggestions above, consider doing the following: Use a simulator or ICE (In-circuit Emulator) system for debugging in embedded systems. These tools allow the programmer/tester to find some subtle problems more easily. Combined with some of the techniques described above, they can find memory access problems and trace back to the statement that generated the error. Reduce complexity. Calculate a complexity metric. Look at modules that are very complex and reduce them if possible. Complexity metrics can be very simple. One way to calculate McCabes Cyclomatic Complexity is to add the number of decisions and subtract one. An if is a 1. A case/switch statement with 3 cases is 2. Add these up, subtract one. If the complexity is over 10, look at simplifying the routine. Design for weak coupling between modules (classes, etc.). The more independent the modules are, the less you can screw them up later in the process. Fixes when an error is found in testing may create problems because of misunderstood dependencies between modules. Consider the stability of the requirements. If the requirements are likely to change, design as much flexibility as possible into the system. Consider compiler optimization carefully. Debuggers may not work well with optimized code. Its hard to trace from the source code to the optimized object code. Optimization may change the way the programmer expected the code to operate (removing unused features that are actually used!). Be careful if using multi-threaded programs. Developing multi-threaded programs is notoriously difficult. Subtle program errors can result from unforeseen interactions among multiple threads. In addition, these errors can be very hard to reproduce since they often depend on the non-deterministic behavior of the scheduler and the environment. A dependency graph is a valuable software engineering aid. Given such a diagram, its easy to identify what parts of the software can be reused, create a strategy for incremental testing of modules, and develop a method to limit error propagation through the entire system. Follow the two person rule. At least two people should be thoroughly familiar with the design, code, testing and operation of each software module of the system. If one person leaves the project, someone else understands what is going on. Prohibit program patches. During development, patching a program is a bad idea. Make the changes in the code and recompile instead. During operations, patching may be a necessity, but should still be carefully considered. Keep Interface Control Documents up to date. Out-of-date information usually leads to one programmer creating a module or unit that will not interface correctly with another unit. The problem isnt found until late in the testing phase, when it is expensive to fix. Besides keeping the documentation up to date, use an agreed-upon method to inform everyone of the change. Create a list of possible hardware failures that may impact the software, if they are not spelled out in the software requirements document. Have the hardware and systems engineers review the list. The software must respond properly to these failures. The list will be invaluable when testing the error handling capabilities of the software. Having a list also makes explicit what the software can and cannot handle, and unvoiced assumptions will usually be discovered as the list is reviewed. The following programming suggestions are derived from SSP 50038, Computer-Based Control System Safety Requirements for the International Space Station Program: Provide separate authorization and separate control functions to initiate a critical or hazardous function. This includes separate arm and fire commands for critical capabilities. Do not use input/output ports for both critical and non-critical functions. Provide sufficient difference in addresses between critical I/O ports and non-critical I/O ports, such that a single address bit failure does not allow access to critical functions or ports. Make sure all interrupt priorities and responses are defined. All interrupts should be initialized to a return, if not used by the software. Provide for an orderly shutdown (or other acceptable response) upon the detection of unsafe conditions. The system can revert to a known, predictable, and safe condition upon detection of an anomaly. Provide for an orderly system shutdown as the result of a command shutdown, power interruptions, or other failures. Depending on the hazard, battery (or capacitor) backup may be required to implement the shutdown when there is a power failure. Protect against out-of-sequence transmission of safety-critical function messages by detecting and deviation from the normal sequence of transmission. Revert to a known safe state when out-of-sequence messages are detected. Initialize all unused memory locations to a pattern that, if executed as an instruction, will cause the system to revert to a known safe state. Hazardous sequences should not be initiated by a single keyboard entry. Prevent inadvertent entry into a critical routine. Detect such entry if it occurs, and revert to a known safe state. Dont use a stop or halt instruction. The CPU should be always executing, whether idling or actively processing. When possible, put safety-critical operational software instructions in nonvolatile read-only memory. Dont use scratch files for storing or transferring safety-critical information between computers or tasks within a computer. When safety interlocks are removed/bypassed for a test, the software should verify the reinstatement of the interlocks at the completion of the testing. Critical data communicated from one CPU to another should be verified prior to operational use. Set a dedicated status flag that is updated between each step of a hazardous operation. This provides positive feedback of the step within the operation, and confirmation that the previous steps have been correctly executed. Verify critical commands prior to transmission, and upon reception. It never hurts to check twice! Make sure all flags used are unique and single purpose. Put the majority of safety-critical decisions and algorithms in a single (or few) software development module(s). Decision logic using data from hardware or other software modules should not be based on values of all ones or all zeros. Use specific binary patterns to reduce the likelihood of malfunctioning hardware/software satisfying the decision logic. Safety-critical modules should have only one entry and one exit point. Perform reasonableness checks on all safety-critical inputs. Perform a status check of critical system elements prior to executing a potentially hazardous sequence. Always initialize the software into a known safe state. This implies making sure all variables are set to an initial value, and not the previous value prior to reset. Dont allow the operator to change safety-critical time limits in decision logic. When the system is safed, usually in response to an anomalous condition or problem, provide the current system configuration to the operator. Safety-critical routines should include come from checks to verify that they are being called from a valid program, task, or routine. 6.16 Wrapping it all up The job of creating safe software is not an easy one. It is a balance of risks and benefits. Sometimes the benefit is a personal choice (I like the way this editor works), meeting a contractual requirement (The software shall be written in Ada), producing software that meets size or timing restrictions, or creating the software within a given schedule. Each choice carries an associated level of risk of creating unsafe software. In an ideal world, all tools, programming languages, operating systems, etc. that meet your needs would also be designed for safe software. Such a world does not exist. Often you are lucky to find items that meet your needs without extensive modification! However, always keep the goal of producing safe software in your mind. 7. SOFTWARE ACQUISITION Acquiring software, whether off-the-shelf, previously created, or custom made, carries with it a set of risks and rewards that differ from those related to software development. When the software will serve a safety critical function, or be integrated with in-house developed safety critical code, it becomes very important to select carefully. This section provides guidance on both purchased off-the-shelf and reused software as well as software acquired from a contractor. Software safety is a concern with off-the-shelf (OTS), reused, and contract-developed software, and NASA safety standards apply to all types. NASA-STD-8719.13A, the Software Safety NASA Technical Standard, section 1.3, states (emphasis added): This standard is appropriate for application to software acquired or developed by NASA that is used as a part of a system that possesses the potential of directly or indirectly causing harm to humans or damage to property external to the system. When software is acquired by NASA, this standard applies to the level specified in contract clauses or memoranda of understanding. When software is developed by NASA, this standard applies to the level specified in the program plan, software management plan, or other controlling document. NASA Policy Directive NPD 2820.1, NASA Software Policies, includes consideration of COTS and GOTS software that is part of a NASA system. Projects need to evaluate whether the use of COTS and GOTS would be more advantageous than developing the software. It expects proof that software providers are capable of delivering products that meet the requirements. Off-the-shelf (OTS) software and reused software share many of the same benefits and concerns. They will be grouped together for convenience in  HYPERLINK \l "_7.1_Off-the-Shelf_Software" section 7.1. OTS or off-the-shelf software will refer to both off-the-shelf (usually commercial) software and reused software. When a comment refers only to one or the other, the appropriate form of the software will be clearly designated. Software developed under contract will be discussed in  HYPERLINK \l "_7.2_Contractor-developed_Software" section 7.2. For off-the-shelf software, this section discusses the following areas: Pros and Cons of OTS software What to look for when purchasing off-the-shelf software Using OTS in your system Recommended extra testing for OTS software For contract-developed software, guidance is provided on What to put in the contract regarding software development. What to monitor of contractor processes (insight/oversight) What testing is recommended 7.1 Off-the-Shelf Software The decision to use off-the-shelf (OTS) software in your system should not be made lightly. While it is becoming common to purchase software rather than create it, the process is not without pitfalls. Reusing software from other projects, even similar projects, is not a panacea either. Systems differ, and the subtle differences can lead to devastating results. Why is OTS software use becoming more commonplace in NASA and industry? Primarily, the prevailing wisdom is that it will save on cost and/or schedule. If a commercial software product can be purchased that meets the needs of the project, it is usually a less expensive alternative to developing the software in-house. Or the organization may have software from a similar project that can be reused in the new project. In addition, the OTS software is often available immediately, which helps in a tight schedule. In a project strapped for money or time, OTS software looks very attractive. However, there are risks involved in using OTS software. Some of the issues are discussed below, and reference [4] provides a method for determining the risks, as well as the cost/benefit ratio, for using COTS software in your system. Reference [2] discusses concerns as well as ways to make sure the OTS software meets your needs and is safe. Some OTS software is so common that most developers do not even consider it. Operating Systems (OS) are one example. It is very rare for a development team to create their own operating system, rather than purchasing a commercial one. Guidance on what to look for in an operating system is given in  HYPERLINK \l "_6.10__Operating" section 6.10 Operating Systems. Another example of common OTS software is language libraries, such as the C standard library. OTS software has acquired another name recently: SOUP (Software of Uncertain Pedigree). In many ways, SOUP is a better name, because it emphasizes the potential problems and pitfalls upfront. OTS software may be developed by a team that uses good software engineering practices, or by a few developers working late nights, living on pizza and soft drinks, and banging out some code. Knowing the pedigree of the OTS software can save you headaches down the road. Carefully consider all aspects of the OTS software under consideration. It not an easy decision, choosing between creating the software in-house (with its accompanying headaches) or purchasing OTS software/reusing software (which have a different set of headaches). You must take a systems approach and consider how the OTS software will fit into your system. You must also perform an adequate analysis of the impacts of the OTS software. Dont wait until you are deep into implementation to find out that one of the extra functions in the OTS software can cause a safety hazard! Consider the following questions: Will you need glueware to connect the software to your system? How extensive will the glueware need to be? Will you have to add functionality via glueware because the OTS software doesnt provide all of it? Is there extra functionality in the OTS software that the rest of the system needs to be protected from? What extra analyses will you need to perform to verify the OTS software? What extra tests will you need to do? If the glueware is going to be a significant portion of the size of the OTS software, you may want to rethink the decision to use OTS. You dont save time or money if you have to created extensive wrappers, glueware, or other code to get the OTS software working in your system. Also, in a safety critical system, the cost of extra analyses and tests may make the OTS software a costly endeavor. In safety-critical systems, OTS software can be a burden as well as a blessing. The main problems with off-the-shelf software are: Inadequate documentation. Often only a user manual is provided, which describes functionality from a user point of view. In order to integrate the software into the system, more information is needed. In particular, information is required on how the software interacts within itself (between modules) and with the outside world (its application program interface (API)). Lack of access to source code, which precludes some safety analyses. It also precludes obtaining a better understanding of how the software actually works. Lack of knowledge about the software development process used to create the software. Lack of knowledge about the testing process used to verify the software. Concern that the OTS developers may not fully understand the interactions between elements of their system or may not communicate that information fully to the purchaser. Inadequate detail on defects, including known bugs not provided to purchaser. Inadequate or non-existent analyses performed on the software. Missing functionality. The OTS software may provide most but not all required functionality. This is one area where glueware is necessary. Extra functionality. The OTS software may contain functions that are not required. Sometimes these functions can be turn off, but unless the OTS software is recompiled, the code for these functions will remain in the system. Glueware (wrappers) may be needed to shield the rest of the system from this extra functionality. For further information on how OTS software is handled in other industries, check references [7] (FDA) and [8] (nuclear). They provide some high-level guidance on using COTS software in medical and nuclear applications, both of which are highly safety-critical venues. The Avionics Division of the Engineering Directorate at the NASA Lyndon B. Johnson Space Center (JSC) baselined a work instruction, EA-WI-018, Use of Off-the-Shelf Software in Flight Projects Work Instruction that outlines a lifecycle process for OTS software projects, including safety considerations. This work instruction is based partly on the FDAs process detailed in Guidance for Off-the-Shelf Software Use in Medical Devices. [??] The lifecycle described in the JSC work instruction coordinates the selection and integration of OTS software with the development and implementation of custom software. In comparing the lifecycle processes it is evident that the amount of time spent on each phase changes and the skills of the personnel need to be different. For selecting OTS products, a great deal of time is spent evaluating the functional needs of the project and the available OTS products on the market. Flexibility of requirements is needed with a clear idea of the overall system. A poor OTS selection can severely complicate or cripple a project. A series of questions are included in both the JSC work instruction and the FDA guidance document to give personnel enough information to determine whether or not to use a specific OTS software product. The work instruction specifies that an initial determination of the criticality of the function must be accomplished. The amount of scrutiny the candidate OTS software faces is based on the criticality assessed. Experienced personnel need to determine the criticality. The JSC work instruction and the FDA guidance document list similar requirements for high criticality OTS software. A project with life threatening hazards must do the first three items of the  HYPERLINK \l "_E.1_Checklist_for" Checklist for Off-the-Shelf (OTS) Items (second checklist) in Appendix E. Some of this section, and those that follow, on Off-the-Shelf software issues, especially within NASA, comes from a whitepaper by Frances E. Simmons of JSC [11]. 7.1.1 Purchasing or Reusing OTS Software: Recommendations While all OTS software should be considered carefully, using OTS software in a safety critical system ups the ante. OTS software that directly performs a safety critical function is not the only element that must be considered. Any OTS software that resides on the same platform as the safety critical software must be analyzed, to verify that it cannot impact the safety critical code. Since there is no independent COTS certification authority to test the safety and reliability of the COTS software, all additional analyses and tests will have to be done by you. Using non-safety-critical OTS software on the same platform as safety-critical software is not recommended. Certain commercial programs are known to crash regularly. Do you really want to have Word on the same system that controls your air supply? If the OTS software provides a necessary function, then it must be considered in conjunction with the safety critical code. The hazard analysis should be updated (or at least reviewed) before you purchase the software. This guidebook gives an introduction to the good software development processes that go into safety critical software development. As much as possible, verify that the OTS software was created using good development processes. When purchasing OTS software, or deciding to reuse existing code, the following areas should also be considered: Does the OTS software fill the need in this system? Is its operational context compatible with the system under development? Consider not only the similarities between the system(s) the OTS was designed for and the current system, but also the differences. Look carefully at how those differences affect operation of the OTS software. How stable is the OTS product? Are bug-fixes or upgrades released so often that the product is in a constant state of flux? How responsive is the vendor to bug-fixes? Does the vendor inform you when a bug-fix patch or new version is available? How compatible are upgrades to the software? Has the API changed significantly between upgrades in the past? Will your interface to the OTS software still work, even after an upgrade? Will you have to update your glueware with each iteration? How cutting edge is the software technology? OTS software is often market driven, and may be released with bugs (known and unknown) in order to meet an imposed deadline or to beat the competition to market. Conversely, is the software so well known that it is assumed to be error free and correct? Think about operating systems and language libraries. In a safety critical system, you do not want to assume there are no errors in the software. What is the user base of the software? If it is a general use library, with thousands of users, you can expect that most bugs and errors will be found and reported to the vendor. Make sure the vendor keeps this information, and provides it to the users! Small software programs will have less of a shake down and may have more errors remaining. What level of documentation is provided with the software? Is there more information than just a users manual? Can more information be obtained from the vendor (free or for a reasonable price)? Is source code included, or available for purchase at a reasonable price? Will support still be provided if the source code is purchased or if the software is slightly modified? Can you communicate with those who developed the software, if serious questions arise? Is the technical support available, adequate, and reachable? Will the vendor talk with you if you modify the product? Will the vendor support older versions of the software, if you choose not to upgrade? Many vendors will only support the newest version, or perhaps one or two previous versions. Is there a well-defined API (Application Program Interface), ICD (interface control document), or similar documentation that details how the user interacts with the software? Are there undocumented API functions? What are the error codes returned by the software? How can it fail (return error code, throw an exception, etc.)? Do the functions check input variables for proper range, or it is the responsibility of the user to implement? Can you obtain information on the internals of the software, such as the complexity of the various software modules or the interfaces between the modules? This information may be needed, depending on what analyses need to be performed on the OTS software. Can you get information about the software development process used to create the software? Was it developed using an accepted standard (IEEE 12207, for example)? What was the size of the developer team? What types of testing was the software subjected to? How thorough was the testing? Can you get copies of any test reports? Are there any known defects in the software? Are there any unresolved problems with the software, especially if the problems were in systems similar to yours? Look at product support groups, newsgroups, and web sites for problems unreported by the vendor. However, also keep in mind the source of the information found on the web some is excellent and documented, other information is spurious and incorrect. Were there any analyses performed on the software, in particular any of the analyses described in HYPERLINK \l "_5._SOFTWARE_SAFETY_2"section 5? Formal inspections or reviews of the code? How compatible is the software with your system (other software, both custom and OTS)? Will you have to write extensive glueware to interface it with your code? Are there any issues with integrating the software, such as linker incompatibility, protocol inconsistencies, or timing issues? Does the software provide all the functionality required? How easy is it to add any new functionality to the system, when the OTS software is integrated? Will the OTS software provide enough functionality to make it cost-effective? Does the OTS-to-system interface require any modification? For example, does the OTS produce output in the protocol used by the system, or will glueware need to be written to convert from the OTS to the system protocol? Does the software provide extra functionality? Can you turn off any of the functionality? If you have the source code, can you recompile with defined switches or stubs to remove the extra functionality? How much code space (disk, memory, etc.) does the extra software take up? What happens to the system if an unneeded function is accidentally invoked? Will the OTS software be stand-alone or integrated into your system? The level of understanding required varies with the two approaches. If stand-alone (such as an Operating System), you need to be concerned with the API/ICD primarily, and interactions with your independent software is usually minimal. If the software is to be integrated (e.g. a library), then the interaction between your code and the OTS software is more complicated. More testing and/or analyses may be needed to assure the software system. Does the OTS software have any back doors that can be exploited by others and create a security problem? Is the software version 1.0? If so, there is a higher risk of errors and problems. Consider waiting for at least the first bug-fix update, if not choosing another product. If the OTS products interface is supposed to conform to an industry standard, verify that it does so. HYPERLINK \l "_E.1_Checklist_for"Appendix E provides the above information as a checklist, and also contains another checklist of items to consider when using OTS software in your system. IEEE 1228, the standard for Software Safety Plans, states that previously developed (reused) software and purchased software must be Adequately tested. Have an acceptable risk. Remains safe in the context of its planned use. Any software that does not meet these criteria, or for which the level of risk or consequences of failure cannot be determined, should not be used in a safety critical system. In addition, IEEE 1228 provides a standard for a minimal approval process for reused or purchased software: Determine the interfaces to and functionality of the previously developed or purchased software that will be used in safety-critical systems. Identify relevant documents (e.g. product specifications, design documents, usage documents) that are available to the obtaining organization and determine their status. Determine the conformance of the previously developed or purchased software to published specifications. Identify the capabilities and limitations of the previously developed or purchased software with respect to the projects requirements. Following an approved test plan, test the safety-critical features of the previously developed or purchased software independent of the projects software. Following an approved test plan, test the safety-critical features of the previously developed or purchased software with the projects software. Perform a risk assessment to determine if the use of the previously developed or purchased software will result in undertaking an unacceptable level of risk. 7.1.2 Integrating OTS Software into your System Okay, youve weighed all the factors (in-house development costs/time vs. OTS costs/time including glueware and extra tests/analyses), and decided to go ahead with using OTS software. Remember, OTS includes software reused from a different project as well. Now that software must be integrated into the software system, which consists of in-house developed code and/or other OTS/reused code modules. Keeping the OTS code as isolated as possible from the rest of the system is a good idea, and several approaches to doing this are presented below. Reference [1] discusses these approaches, and more. Making sure the software system is safe also requires some additional tests and analyses, which are discussed in HYPERLINK \l "_7.1.4_Who_Tests"section 7.1.4. 7.1.2.1 Sticky stuff: Glueware and Wrapper Functions It would be great if the OTS software (including reused libraries and functions from other projects) could just be plunked down into the new system with no additional work. It doesnt work that way, of course. You have to connect the OTS software to the rest of the code. The software that provides the connection is called glueware. Glueware is a general term for any software that sits between the OTS software and the rest of the software system. Usually it is the software required to connect the two pieces (OTS and in-house) and make them work and play well together. Two specific versions of glueware are wrappers, which are described below, and adapters, which are discussed in  HYPERLINK \l "_7.1.2.3_Adding_new" 7.1.2.3. Wrappers are an encapsulation mechanism, where the OTS code is isolated from the rest of the system. Wrappers can prevent certain inputs from reaching the OTS component and/or check and validate outputs from the component. Restricting inputs should be done if certain values could cause the OTS/reused software to behave badly or execute  HYPERLINK \l "_7.1.2.4_Dealing_with" dormant code. Finding those inputs can be difficult, especially when the source code is unavailable and the documentation is barely adequate. Reference [5] discusses using software fault injection with an OTS component, to determine what undesirable outputs the component can produce, and what inputs lead to those outputs. An experiment in using fault injection with wrapper functions to test the interface robustness of the system is described in reference [3]. Wrappers have several problems when they are applied to OTS software. First, the OTS components interface must be well understood, which requires more-than-adequate documentation. Outputs that are outside the documented understanding may slip through the wrapper. Second, wrappers may be quite complex, and can approach or exceed the size of the OTS component. Reference [6] discusses generic software wrappers, including the development of a wrapper description language, for wrapping COTS software in a Unix environment. The primary focus of the article is on security issues, but is of interest to anyone considering creating OTS wrappers. Wrappers have a use outside of the operational software. During debug/testing phase, or while evaluating the OTS software, wrappers can be used to instrument the software system. Essentially, the wrapper allows information about what is happening (inputs to the OTS, outputs from the OTS) to be recorded or displayed. This provides insight into what the OTS software is doing in the operational or test situations. 7.1.2.2 Redundant Architecture The influence of the COTS component can be restricted by using a redundant architecture. Replication and multi-voting approaches, including n-version programming, can be used if the software produces consistent faults. That is, for a specific input, the same fault is always produced. However, this approach (especially n-version programming) has debatable reliability, and is not recommended. Partitioning the system into a high-performance portion and a high-assurance kernel (or safety kernel) is another option. The high-performance portion is just that the best, fastest, leanest, etc. code. This part of the software can contain OTS software, as well as custom-developed software. If this part fails, however, the system defaults to the high-assurance kernel. This portion maintains the system in a safe state while providing the required functionality (with reduced performance). The Carnegie Mellon Software Engineering Institute (SEI) developed a framework of safety techniques they named Simplex Architecture. These techniques include high-assurance application-kernel technology, address-space protection mechanisms, real-time scheduling algorithms, and methods for dynamic communication among modules. This process requires using analytic redundancy to separate major functions into high-assurance kernels and high-performance subsystems. Off-The-Shelf (OTS) products can be used in the high-performance subsystem and even replaced without bringing down the system. Reference [9] describes the Simplex Architecture. Redundant architecture is no silver bullet for OTS. It suffers from the same problems as wrapper functions: complexity, and the inability to deal effectively with unknown and unexpected functionality. 7.1.2.3 Adding or Adapting Functionality Sometimes the OTS software is almost what you want, but is missing some small piece of required functionality. Or the OTS software contains what is needed, but the interfaces dont match up. In either case, a specialized form of glueware called an adapter can be written. If extra functionality is required, an adapter will intercept the input (command, function call) for that functionality, execute the new function which it contains, and return the result all without invoking the OTS software! Or, the adapter may provide some pre- or post-processing for an OTS function. For example, the OTS software has a function to control 16-bit output ports. The function takes two parameters port address and value to write to the port. The primary software needs to access a specialize output port. This one requires writing to two consecutive 8-bit ports instead of one 16-bit port. The adapter software intercepts the function call (by matching the port address to the special one). It breaks the 16 bit value passed into 2 8-bit values, then performs two calls to the OTS function to write the values, incrementing the output port address by one between the calls. When the interfaces between the OTS software and the rest of the code dont match up, an adapter can be written to translate between the two. For example, the OTS software produces messages in one format (header, message, checksum), but the standard protocol used by the rest of the system has a different header and uses a CRC instead of a checksum. The adapter would intercept messages coming from the OTS software, modify the header, and calculate the CRC before passing the message on.. 7.1.2.4 Dealing with Extra Functionality Because OTS software was originally written for another application, or written as a general set of functions, it will often contain extra functionality not needed by the current project. This extra code is referred to as dormant code, because it should sit there in the software, undisturbed and not executing. The trick is to make sure thats what it really does! The first step is to identify if the OTS/reused software has any dormant code within it. To adequately determine this, access to the source code is necessary. If source code is unavailable, the software provider may be able to provide information, if you supply a list of the functions you will be using. Product user groups, newsgroups, and web pages may also contain useful information, though always consider the source of the information. If nothing else is available, look through the documentation for defined functions that you will not be using. The higher the ratio of used functionality to total functionality in the OTS software, the less dormant code there is. Once the presence of dormant code is determined, look for the stimuli that activate it (cause it to execute). That will include command invocation, function calls to specific routines, and possible invocation from required functions based on the software state or parameters passed. If the source code is available, it can be examined for undefined (in the documentation) ways of entering the dormant code. Also look at the resources the dormant code uses. How much memory space does it take up? How much disk/storage space? Will the presence of this extra code push the software system close to any prescribed limits? In an ideal system, you will be able to identify any dormant code and verify that it cannot ever be executed. Since we never have an ideal system, contingency planning (risk mitigation) is required. Look at what happens when the dormant code is executed. What functions does it perform? Does it affect the system performance or capacity? Examine the behavior of the system if any of the dormant code is executed can it go into an unsafe state? Will system performance be degraded, leading to a possible mission or safety issue? Can the dormant software lead to a hazard, or interfere with a hazard control or mitigation?  You have to protect your system (in particular your safety critical functions) from malfunctioning OTS software. This requires wrapping the OTS software (glueware) or providing some sort of firewall. The more dormant code there is in the OTS software, the more likely it is to trigger accidentally. Depending on the issue and the product, you may be able to work with the vendor to disable the dormant code, or provide some safeguards against its unintentional execution. Procedural methods to avoid accidentally invoking the dormant code can be considered, but only as a last resort. They open up many possibilities for human error to cause problems with the system. How much extra testing will need to be done on the software system is determined by the amount of dormant code, the level of insight into the code (what can it do), and any safety problems with the code. OTS software that could interfere with a safety control, with no source code availability, and with little or no details on its development and testing, will require extra testing! Software with good documentation, well encapsulated (not integrated with the system), and with no ability to affect safety (such as no I/O capability) may not need much extra testing. The determination of the level of testing effort should be made by the software engineers and the safety engineers, with input from project management, based on the risk to the system and to safety that the OTS software imparts. 7.1.3 Special Problems with Reused Software The greatest problem with reusing software created for a different project is psychological. You know what the software does, how well it was tested in the previous system, and it just fits so perfectly into the new system. You dont need any extra analysis or testing its already been done. WRONG! Thats what the Ariane 5 team thought when they reused Ariane 4 software! (See reference [10] for details.) No two systems are alike. You cannot assume that the software you wrote for System A is going to function as expected in System B. The new system may be on a faster processor, and timing problems that werent apparent in slower System A now become critical. Or the new system may have a critical task that must be executed regularly. The reused code may tie the system up long enough that the critical task is delayed. It is very important to analyze the reused code in light of the capacity, capability, and requirements of the new system. Look for issues with timing, hogging the system, overwriting variables, using the same system resources for different purposes, and . As well as other issues. Carefully consider the differences between the new and old systems, and how those differences can affect the functioning of the reused software. It is a goal in modern software engineering to create reusable software. After all, why should the wheel have to be constantly reinvented? We recycle many things in our society why not code? While a laudable goal, software reuse is still in its infancy, and all the problems and pitfalls havent been found yet. Applying reused software to a new system requires a lot of thought up front.  7.1.4 Who Tests the OTS? (Us vs. Them) Hopefully, the OTS software you are about to use has been thoroughly tested, either by the vendor or by the previous project. If youre lucky, you have copies of the test reports. If youre even more lucky, you have a copy of a hazard analysis for the software. You can stop now, right? Wrong. Even the most thoroughly tested and analyzed OTS software must still be analyzed and tested for how it operates in the new system! Think about OTS software as a child in a playground. It may play well with the children in the sandbox and on the slide. It gets dizzy on the merry-go-round, but still keeps playing. But put it on the swing and the rope breaks, dumping itself on the ground and creating a hazard for other children in the area. No two systems are identical. Old software must be looked at in the new context.  Safety critical Off-The-Shelf (COTS, GOTS, or reused) software should be analyzed (up front) and tested by the user. Software that resides on the same platform as safety critical software (and is not partitioned from that software), or any high risk OTS software are included as safety critical. These analyses and tests should be performed whether the software is modified or not. Remember, this is YOUR system. The OTS software may have been tested, even thoroughly tested, but not in your system environment. Ariane 5 [10] demonstrated that well tested software can cause significant problems when applied to a new system (domain). Your first step is to find out what testing has already been done. Ideally, the software will be delivered with documentation that includes the tests performed and the results of those tests. If not, ask the vendor for any test or analysis documentation they have. (It never hurts to ask.) If the software is government supplied or contractor developed for a government program, hazard analyses may be available. Hazard analyses may be available for other software as well, though it is less likely for commercial software unless developed for other safety regimes (FAA, medical, automobile, etc.). Existing hazards analyses and test results of unmodified software previously used in safety critical systems may be used as the basis for certification in a new system. The existing analyses and test reports should be reviewed for their relevance to the reuse in the new environment. This means that you need to look at the differences in the environment between the old system and the system you wish to use this software in. If the OTS software causes or affects hazards, you need to address mitigation. If source code is available, correct the software to eliminate or reduce to an acceptable level of risk any safety hazards discovered during analysis or test. The corrected software must be retested under identical conditions to ensure that these hazards have been eliminated, and that other hazards do not occur. If source code is not available, the hazards must be mitigated in another way wrapping the OTS software, providing extra functionality in the in-house software, removing software control of a hazard cause/control, or even deciding not to use the OTS software. Thoroughly test the system to verify that the hazards have been properly mitigated. OTS software has taken software and system development by storm, spurred on by decreasing funds and shortened schedules. Safety engineering is trying to catch up, but the techniques and tests are still under development. Providing confidence in the safety of OTS software is still something of a black art. 7.1.3.1 Recommended Analyses and Tests The hazard analysis (mentioned above) must be updated to include the OTS software. Any new hazards that the OTS software adds to the system must be documented, as well as any ability to control a hazard. As much as possible, consider the interactions of the OTS software with the safety critical code. Look for ways that the OTS software can influence the safety critical code. For example, overwriting a memory location where a safety critical variable is stored getting into a failure mode where the OTS software uses all the available resources and prevents the safety critical code from executing clogging the message queue so that safety critical messages do not get through in a timely manner. Ideally, OTS software should be thoroughly tested in a stand alone environment, before being integrated with the rest of the software system. This may have been already done, and the vendor may have provided the documentation. The level of software testing should be determined by the criticality or risk level of the software. High risk safety critical software should be analyzed and tested until it is completely understood. If the OTS software is safety critical, subject it to as many tests as your budget and schedule allow. The more you know about the software, the less likely it is to cause problems down the road. Since source code is often not available, the primary testing will be black box. Test the range of inputs to the OTS software, and verify that the outputs are as expected. Test the error handling abilities of the OTS software by giving it invalid inputs. Bad inputs should be rejected or set to documented values, and the software should not crash or otherwise display unacceptable behavior. See if the software throws any exceptions, gets into infinite loops, or reaches modes where it excessively uses system resources. Software fault injection (SFI) is a technique used to determine the robustness of the software, and can be used to understand the behavior of OTS software. It injects faults into the software and looks at the results (Did the fault propagate? Was the end result an undesirable outcome?). Basically, the intent is to determine if the software responds gracefully to the injected faults. Traditional software fault inject used modifications of the source code to create the faults. SFI is now being used on the interfaces between components, and can be used even when the source code is unavailable. [5], [3], and [12] discuss software fault injection with COTS software. The following analyses, if done for the system, should be updated to include the OTS software: Timing, sizing and throughput especially if the system is close to capacity/capability limits. Software fault tree, to include faults and dormant code in the OTS software Interdependence and Independence Analyses, if sufficient information available Design Constraint Analysis Code Interface Analysis Code Data Analysis Interrupt Analysis Test coverage Analysis 7.2 Contractor-developed Software With government downsizing and budget cutting, a large portion of the software previously developed in-house at NASA centers is now being contracted out. Usually, whole systems are developed under the contract, including the software that runs the system. The NASA Safety Manual (NPG 8715.3), chapter 2, discusses safety and risk management requirements for NASA contracts. Responsibilities of the Project/Program manager, Contracting Officer, and Safety and Mission Assurance personnel are described. With a contract, especially a performance-based contract, it is usually difficult to specify how the contractor develops the software. The end-result is the primary criteria for successful completions. However, with safety critical software and systems, the how is very important. The customer needs to have insight into the contractor development processes. This serves two purposes: to identify major problems early, so that they can be corrected; and to give confidence in the final system. 7.2.1 Contract Inclusions Once the contract is awarded, both sides are usually stuck with it. Making sure that the delivered software is what you want starts with writing a good contract. According to NPG 8715.3, the following items should be considered for inclusion in any contract: Safety requirements Mission success requirements Risk management requirements Submission and evaluation of safety and risk management documentation from the contractor, such as corporate safety policies, project safety plans, and risk management plans. Reporting of mishaps, close calls, and lessons learned Surveillance by NASA. Performance-based contracts still have a requirement for surveillance! Sub-contracting require that the safety requirements are passed on to sub-contractors! Clear, concise and unambiguous requirements prevent misunderstandings between the customer and the contractor. Safety requirements should be clearly stated in the specifications. Remember that changing requirements usually involves giving the contractor more money. Do as much thinking about the system safety up front, and write the results into the system specification. 7.1.2.1 Safety Process NASA has a particular process for safety verification of flight projects (both shuttle and ISS). This involves creating a Safety Data Package and going through three levels of reviews at Johnson Space Center. The reviews are by phase. Phase 0/1 is the preliminary review, where the JSC Safety Panel learns about the project, reviews the safety aspects of the preliminary design, and has a chance to input any safety concerns to the project. Phase II (2) usually occurs around the Critical Design Review or during the actual implementation of the design. It is a more in-depth look at the system, hazards, and controls, as well as at the verification process to assure the hazards are mitigated. Phase III (3) occurs near the end of the project, several months before launch. The verification of safety features must be complete, or tracked on a Verification Tracking Log (VTL) if they are still outstanding. All VTL inputs must be completed before the Flight Readiness Review, prior to launch. It is important to specify in the contract who has responsibility for preparing and presenting the Safety Data Package to the JSC Safety Panel. Software safety will need to be addressed as part of the process. 7.1.2.2 Analysis and Test The contract should also clearly state what analyses or special tests need to be done for safety and mission assurance, who will do them, and who will see the results. In a performance-based contract, usually the contractor performs all functions, including analyses. In other cases, some of the analyses may be handled from the NASA side. Regardless, the tests and analyses should be spelled out, as well as the responsible party. Before writing the contract, review sections HYPERLINK \l "_4._SAFETY_CRITICAL_1"  4 (development) and HYPERLINK \l "_5._SOFTWARE_SAFETY_2"5 (analysis) to determine what development processes, analyses, and tests need to be included in the system specification or contract. Use the guidance on  HYPERLINK \l "_3.2.3.3_Tailoring_the" tailoring when imposing processes, analyses and tests. 7.1.2.3 Software Assurance and Development Process Software Assurance (SA) (also referred to as Software Quality Assurance or Software Product Assurance) is a vital part of successful software development. For a performance-based contract, the SA role is usually handled by a managerially independent group within the contracting company. For other contracts, SA may be performed by NASA SPA personnel, if stated in the requirements. Regardless of who performs the SA function, the requirement for a Software Assurance function should be included in the contract. Rather than call out specific roles and responsibilities for SA, requiring use of an accepted standard (IEEE 12207 or CMM level 3, for example), or specifying that the SA responsibilities will be called out in an SA plan that is approved by the NASA project manager, is sufficient. The contract can also state process requirements that the contractor must meet. For example, software development according to IEEE 12207 may be required. The contractor can be required to have or obtain a certain level of the Capability Maturity Model (CMM). Local ISO requirements (local to the NASA center) may also be imposed. A special method of problem reporting may be required, or the contractor may use their own, established method. It is important that a mechanism exist for NASA to be aware of problems and their corrections, once the software and system reaches a certain level of maturity. A formal Problem Reporting/Corrective Action process usually begins when the software has reached the first baseline version. 7.1.2.3 Contractor Surveillance It is important when imposing requirements on the contractor that a method of monitoring their compliance is also included. Metrics might be selected that will give insight into the software status. The submittal of corroborating data might be required (such as certification to ISO 9000 or CMM level 3). Surveillance of the contractor also needs to be included, and is discussed in  HYPERLINK \l "_7.2.2_Monitoring_Contractor" section 7.2.2. 7.1.2.4 Software Deliverables Making sure you get the software you need is important, but figuring it out what you need at the beginning of a project can be difficult. Some of the software deliverables are obvious. Others are often forgotten or overlooked. You also need to consider what happens after the system is delivered and the contractor is no longer part of the project. You, the customer, will have to maintain the system. Make sure you have enough information to do that! This list encompasses many commonly required or desired software deliverables. It does not include every conceivable software deliverable, but gives a good starting point. Operational software the software that is part of/runs the system. This includes flight software, ground support software, and analysis software. Standard project documentation for software, including Software Management Plan, Software Development Plan, Software Assurance Plan, Software Requirements Specification (if not NASA-provided), Verification and Validation Plan, Software Test Plan, and Software Configuration Management Plan. The Risk Management Plan, Safety Plan and Reliability/Maintainability Plan should address software, or point where the risk management, safety, reliability, and maintainability of the software is discussed. Design documentation. Source code. Any development tools used, especially if they are obscure, expensive, or difficult to obtain later. Any configuration files, setup files, or other information required to configure the development tools for use with the project. Simulators or models developed for use with the operational software. These may be needed to reproduce a test, for updates to the software after delivery, or to understand aspects of the software when errors are found during operation. Test software used to verify portions of the software. This includes stubs and drivers from unit and integration tests. Software that generates data for use in testing also falls under this category. Software Assurance reports, including process audit results and discrepancy reports. Formal Inspection reports. Test procedures and reports. User/operator manual. 7.1.2.5 Independent Verification and Validation (IV&V) All NASA projects must complete an evaluation on the need for IV&V or Independent Assessment (IA). NPG 8730 gives the criteria and process for this evaluation. It is important that this evaluation be completed, and agreed to, before the contract is awarded. Extra contractor resources may be needed to aid the IV&V or IA activities. Contractor cooperation with IV&V or IA personnel is expected. At a minimum, management, software development, and software assurance will need to work with the IV&V or IA personnel, providing information, answers, and understanding of the software system and the development process. Depending on the level of IV&V levied on the project, some independent tests may be performed. These may require some contractor resources to implement. 7.1.2.6 Software Change Process The requirement to implement a formal software change process should be included in the contract. The process should include a change control board to consider each request. The board should have representatives from various disciplines, including software development, software assurance, systems, safety, and management. Depending on the level of software/hardware integration, someone with an electronics or mechanical understanding may be included, permanently or as the need for such expertise arises. A NASA/customer representative should be part of the change board, or at least review the board decisions. Some of the changes may impact the ability of the system to meet the requirements, may add or remove functionality, or may impact the safety and reliability of the system. 7.1.2.7 Requirements Specification Problems found now, before the contract is written, will save money in the long run! Most software problems are actually specification problems, and the fixes become progressively more expensive as the software develops. You should also have a mechanism in place to facilitate communication between the contractor and the customer. The requirements are rarely completely unambiguous, and some interpretation often occurs. Exchanges between contractors (and subcontractors) and the NASA customer will help to assure that misunderstanding are caught early, ambiguous requirements are clarified, and everyone is on the same page. 7.2.2 Monitoring Contractor Processes NASA contract monitoring for Safety and Mission Assurance (S&MA) takes one of two approaches. Oversight is an in-line approach, where NASA personnel work with the contractor as a team member. For the software portion, NASA personnel may act as Software Assurance, perform audits, witness tests, and perform safety analyses. They may advise the project on best practices or make suggestions of new techniques or tools. Insight is a more hands off approach and is often used with performance-based contracts. The assumption is that the contractor knows what they are doing, and NASA only needs enough insight into their processes to make sure things are functioning properly and that no major problems are brewing. In this mode, all SA functions are performed by the contractor. NASA software surveillance consists of reviewing the SA records, spot auditing the software development process and the SA process, and participate in major reviews. Other activities may be performed if requested by the contractor and NASA project management. Which approach is used, and the specifics of how to apply the approach, are called out in a surveillance plan. This plan is produced by the NASA project management once the contract is awarded. The who and what details are included in this plan. Who is the responsible party. For example, the contract may state that NASA will perform the Preliminary Hazard Analysis, but that the contractor will perform all subsequent analyses. What would be the list of analyses and special tests that must be performed. What is also the list of audits, records and documentation reviews, and other surveillance processes that the NASA S&MA engineer will need to perform, to verify contractor compliance with the process and SA requirements of the contract. 7.2.3 Recommended Software Testing In addition to tests performed by the contractor, you may wish to do additional tests after software delivery. If the environment under which the software safety verification tests were performed has changed (from engineering model to flight system, for example), or if the safety verification tests were not witnessed by NASA personnel, those tests should be rerun (depending on the criticality of the hazards). Hopefully, all desired tests will have been included in the contract, and the software will be delivered with test reports. If not, then the software should be subjected to the additional tests upon delivery. The software acceptance test should be thorough. It should include more than just functional testing. All must work and must not work functions should be exercised and verified. The error handling and fault tolerance of the software must be verified. You dont want to break the system, but you also want to make sure that the software can safely handle the real world inputs and unanticipated events. 8. REFERENCES References are listed by section, numbered from [1] for each section. 1. INTRODUCTION NASA-STD-8719.13A NASA Software Safety Standard, September 1997 NSTS 13830C Implementation Procedure for NASA Payload System Safety Requirements NASA-GB-A201 NASA Software Assurance Guidebook, 9/89 Jet Propulsion Laboratory, Software Systems Safety Handbook Gowen, Lon D, and Collofello, James S. Design Phase Considerations for Safety Critical Software Systems. Professional Safety, April 1995 2. SYSTEM SAFETY PROGRAM NPG 8715.3 NASA Safety Manual, Chapter-3, System Safety, and Appendix-D (Analysis Techniques) NSTS 13830C Implementation Procedure for NASA Payload System Safety Requirements NSTS-22254 Methodology for Conduct of Space Shuttle Program Hazard Analyses Department of Defense, SOFTWARE SYSTEM SAFETY HANDBOOK, A Technical & Managerial Team Approach , Dec. 1999, by Joint Software System Safety Committee 3. SOFTWARE SAFETY PLANNING NASA Software Acquisition Life Cycle SMAP/Version 4.0, 1989 Leveson, Nancy G., Safeware - System Safety and Computers, Addison-Wesley, Appendix-A Medical Devices - The Therac-25 Story. NMI 8010.1 "Classification of NASA Space Transportation (STS) Payloads". MIL-STD-882D Military Standard - Standard Practice for System Safety International Electrotechnical Committee (IEC), draft standard IEC 1508, Software for Computers in the Application of Industrial Safety-Related Systems. 4. SAFETY CRITICAL SOFTWARE DEVELOPMENT The Computer Control of Hazardous Payloads - Final Report NASA/JSC/FDSD 24 July 1991 SSP 50038 Computer-Based Control System Safety Requirements International Space Station Alpha NSTS 19943 Command Requirements and Guidelines for NSTS Customers STANAG 4404 (Draft) NATO Standardization Agreement (STANAG) Safety Design Requirements and Guidelines for Munitions Related Safety Critical Computing Systems WSMCR 127-1 Range Safety Requirements - Western Space and Missile Center, Attachment-3 Software System Design Requirements. This document is being replaced by EWRR (Eastern and Western Range Regulation) 127-1, Section 3.16.4 Safety Critical Computing System Software Design Requirements. AFISC SSH 1-1 System Safety Handbook - Software System Safety, Headquarters Air Force Inspection and Safety Center. EIA Bulletin SEB6-A System Safety Engineering in Software Development (Electrical Industries Association) NASA Marshall Space Flight Center (MSFC) Software Safety Standard Underwriters Laboratory - UL 1998 Standard for Safety - Safety-Related Software, January 4th, 1994 Radley, Charles, 1980, M.Sc. Thesis, Digital Control of a Small Missile, The City University, London, United Kingdom. Gowen, Lon D. and Collofello, James S. Design Phase Considerations for Safety-Critical Software Systems. PROFESSIONAL SAFETY, April 1995. Spector, Alfred and David Gifford. The Space Shuttle Primary Computer System. Communications of the ACM 27 (1984): 874-900. Knight, John C, and Nancy G. Leveson, An Experimental Evaluation of the Assumption of Independence in Multiversion Programming. IEEE Transactions on Software Engineering, SE12(1986); 96-109. Brilliant, Susan S., John C. Knight and Nancy G. Leveson , Analysis of Faults in an N-Version Software Experiment IEEE Transactions on Software Engineering, 16(1990), 238-237. Brilliant, Susan S., John C. Knight and Nancy G. Leveson , The Consistent Comparison Problem in N-Version Software. IEEE Transactions on Software Engineering, SE45 (1986); 96-109. Dahll, G., M. Barnes and P. Bishop. Software Diversity: Way to Enhance Safety? Informative and Software Technology. 32(1990); 677-685. Shimeall, Timotyh J. and Nancy G. Leveson. An Empirical Comparison of Software Fault Tolerance and Fault Elimination. IEEE Transactions on Software Engineering, 17(1991); 173-182. Laprie, Jean-Claude, J. Arlat, Christian Beounes and K. Kanoun. Definitions and Analysis of Hardware and Software, Fault-tolerant Architectures. Computer. July 1990: 39-51. Arlat, Jean, Karama Kanoun and Jean-Claude Laprie. Dependability Modeling and Evaluation of Software Fault-tolerant Systems. IEEE Transactions on Computers. 39(1990): 504-513. Anderson, T. and P. Lee. Fault Tolerance: Principles and Practice, Englewood Cliffs, NJ: Prentice Hall, 1981. Abbott, Russell J. Resourceful Systems for Fault Tolerance, Reliability and Safety. ACM Computing Survey. March 1990: 35-68. Neumann, Peter G. On Hierarchical Design of Computer Systems for Critic Applications IEEE Transactions on Software Engineering, 12(1986): 905-920. Leveson, Nancy G., Stephen S. Cha, John C. Knight and Timothy J. Shimeall, Use of Self-Checks and Voting in Software Error Detection: An Empirical Study. IEEE Transaction on Software Engineering, SE16(1990); 432-443. Ould, M. A. Software Development under DefStan 00-55: A Guide. Information and Software Technology 32(1990): 170-175. NASA-GB-A302 Formal Methods Specification and Verification Guidebook for Software and Computer Systems. NSTS 1700.7B Safety Policy and Requirements for Payloads Using the Space Transportation System. Lutz, Robyn R., Ampo, Yoko, Experience Report: Using Formal Methods for Requirements Analysis of Critical Spacecraft Software, SEL-94-006 Software Engineering Laboratory Series - Proceedings of the Nineteenth Annual Software Engineering Workshop, NASA GSFC, December 1994. Butler, Ricky W.; and Finelli, George B.: The Infeasibility of Quantifying the Reliability of Life-Critical Real-Time Software IEEE Transactions on Software Engineering, vol. 19, no. 1, Jan 1993, pp 3-12. Rushby, John: Formal Methods and Digital Systems Validation for Airborne Systems, NASA Contractor Report 4551, December 1993 Miller, Steven P.; and Srivas, Mandayam: Formal Verification of the AAMP5 Microprocessor: A Case Study in the Industrial Use of Formal Methods, presented at WIFT 95: Workshop on Industrial-Strength Formal Specification Techniques, April 5-8, 1995, Boca Raton, Florida, USA, pp. 30-43. Butler, Ricky; Caldwell, James; Carreno, Victor; Holloway, Michael; Miner, Paul; and Di Vito, Beb: NASA Langleys Research and Technology Transfer Program in Formal Methods, in 10th Annual Conference on Computer Assurance (COMPASS 95), Gathersburg, MD, June 1995. NUREG/CR-6263 MTR 94W0000114 High Integrity Software for Nuclear Power Plants, The MITRE Corporation, for the U.S. Nuclear Regulatory Commission. Yourdon Inc., Yourdon Systems Method-Model Driven Systems Development:, Yourdon Press, N.J., 1993. Dermaco, Tom, Software State of the Art: Selected Papers, Dorset House, NY, 1990. Butler, Ricky W. and Finelli, George B.: The Infeasibility of Experimental Quantification of Life-Critical Software Reliability. Proceedings of the ACM Sigsoft 91 Conference on Software for Critical Systems, New Orleans, Louisiana, Dec. 1991, pp. 66-76. NASA -STD-2202-93 Software Formal Inspections Standard Model Checking (book) E. M. Clarke, Orna Grumberg, Doron Peled; Mit Press ISBN: 0262032708; Hardcover - 314 pages (December 1999) ;Price: $50.00 Stolper, Steven A. Designs that Fly! An Approach to Software Architectural Design, Embedded Systems Conference, Fall 1998.  HYPERLINK "http://www.esconline.com/98fallpapers.htm" http://www.esconline.com/98fallpapers.htm, class 443 Hall, Anthony, Seven Myths of Formal Methods, IEEE Software, 7(5):11-19, September 1990. Kemmerer, Richard A., Integrating Formal Methods into the Development Process, IEEE Software, 7(5):37-50, September 1990. 5. SOFTWARE SAFETY ANALYSIS NASA-STD-2100-91, NASA Software Documentation Standard Software Engineering Program, July 29, 1991 DOD-STD-2167A Military Standard Defense Systems Software Development, Feb. 29, 1988 (this document has been replaced by DOD-STD-498, which was cancelled in 1998 when IEEE 12207 was released) SSSHB 3.2/Draft JPL Software Systems Safety Handbook NASA-STD-2202-93 Software Formal Inspections Standard NASA-GB-A302 Software Formal Inspections Guidebook Targeting Safety-Related Errors During Software Requirements Analysis, Robyn R. Lutz, JPL. Sigsoft 93 Symposium on the Foundations of Software Engineering. SSP 30309 Safety Analysis and Risk Assessment Requirements Document - Space Station Freedom Program Beizer, Boris, Software Testing Techniques, Van Nostrand Reinhold, 1990. - (Note: Despite its title, the book mostly addresses analysis techniques). Beizer, Boris, Software System Testing and Quality Assurance, Van Nostrand Reinhold, 1987. (Also includes many analysis techniques). Yourdon Inc., Yourdon Systems Method - model driven systems development, Yourdon Press, N.J., 1993. DeMarco, Tom, Software State of the Art: selected papers, Dorset House, NY, 1990. Roberts, N., Vesely, W., Haasl, D., and Goldberg, F., Fault Tree Handbook, NUREG-0492, USNRC, 1/86. Leveson, N., Harvey, P., "Analyzing Software Safety", IEEE Transaction on Software Engineering, Vol. 9, SE-9, No. 5, 9/83. Leveson, N., Cha, S., and Shimeall, T., "Safety Verification of Ada Programs Using Software Fault Trees", IEEE Software, Volume 8, No. 4, 7/91. Feuer, A. and Gehani N. "A comparison of the programming languages C and Pascal"ACM Computing Surveys, 14, pp. 73-92, 1982. Carre, B., Jennings, T., Mac Lennan, F., Farrow, P., and Garnsworthy, J., SPARK The Spade Ada Kernel, 3/88. Ichbiah J. et al., Reference Manual for the Ada Programming Language, ANSI/MIL-STD-1815, 1983. Wichmann B. "Insecurities in the Ada Programming Language", NPL Report 137/89, 1/89. Leveson, N. and Stolzy, J., "Safety analysis using Petri-Nets", IEEE Trans on Software Engineering, p. 386-397, 3/87. Garrett, C., M. Yau, S. Guarro, G. Apostolakais, Assessing the Dependability of Embedded Software Systems Using the Dynamic Flowgraph Methodology. Fourth International Working Conference on Dependable Computing for Critical Applications, San Diego Jan 4-6, 1994 BSR/AIAA R-023A-1995 (DRAFT) Recommended Practice for Human Computer Interfaces for Space System Operations - Sponsor: American Institute of Aeronautics and Astronautics. Sha, Liu; Goodenough, John B. Real-time Scheduling Theory and Ada, Computer, Vol. 23, April 1990, pp 53-62, Research Sponsored by DOD. Sha, Liu; Goodenough, John B. Real-time Scheduling Theory and Ada, The 1989 Workshop on Operating Systems for Mission Critical Computing, Carnegie-Mellon Univ, Pittsburgh, PA. Daconta, Michael C. C Pointers and Dynamic Memory Management" ISBN 0-471-56152-5. Hatton, Les. Safer C: Developing Software for High-Integrity and Safety Critical Systems. McGraw-Hill, New York, 1995. ISBN 0-07-707640-0. Perara, Roland. C++ Faux Pas - The C++ language has more holes in it than a string vest. EXE: The Software Developers Magazine, Vol 10 - Issue 6/November 1995. Plum, Thomas. C Programming Guidelines, pub Plum Hall - Cardiff, NJ.\ISBN 0-911537-03-1. Plum, Thomas. Reliable Data Structures in C, pub Plum Hall - Cardiff, NJ. ISBN 0-911537-04-X. Willis, C. P., and Paddon, D. J. : Machine Learning in Software Reuse. Proceedings of the Seventh International Conference in Industrial and Engineering Application of Artificial Intelligence and Expert Systems, 1994, pp. 255-262 Second Safety through quality Conference, 23rd -25th October 1995, Kennedy Space Center, Cape Canaveral, Florida McDermid, J., "Assurance in High Integrity Software,", High Integrity Software, ed C.T. Sennett, Plenum Press 1989. De Marco, T., Structured analysis and system specification, Prentice-Hall/Yourdon Inc., NY, NY, 1978. Wing, J., "A Specifier's Introduction to Formal Methods," Computer Vol. 23 No. 9, September 1990, pp. 8-24. Meyer, B., "On Formalism in Specification," IEEE Software, Jan 1985, pp. 6-26. Cullyer, J., Goodenough, S., Wichmann, B., "The choice of computer languages for use in safety-critical systems" Software Engineering Journal, March 1991, pp 51-58. Carre, B., Jennings, T., Maclennan, F., Farrow, P., Garnsworthy, J., SPARK The Spade Ada Kernel, March 1988. Boehm, B., "A spiral model of software development and enhancement", IEEE Software Engineering Project Management, 1987, p. 128-142. Unpublished seminar notes, Professor J. Cullyer, JPL, Jan. 92. De Marco, T., Structured Analysis and System Specification, Prentice Hall/Yourdon Inc., NY, NY, 1978. Jaffe , M., N. Leveson, M. Heimdahl, B. Melhart "Software Requirements Analysis for Real-Time Process Control Systems", IEEE Transactions on Software Engineering., Vol. 17, NO. 3 pp. 241-257, March 1991. Rushby, J. "Formal Specification and Verification of a Fault Masking and Transient Recovery Model for Digital Flight Systems," SRI CSL Technical Report, SRI-CSL-91-03, June 1991. Reported in "Risks to the public", P. Neumann, Ed., ACM Software Engineering Notes, Vol. 8, Issue 5, 1983. Parnas, D., van Schouwen, A., and Kwan, S., "Evaluation of Safety-Critical Software", Communications of the ACM, p. 636648, 6/90. Leveson, N., "Software Safety: Why, What, and How", Computing Surveys, p. 125-163, 6/86. IEC/65A/WG9, Draft Standard IEC 1508 "Software for Computers In the Application of Industrial Safety-Related Systems", Draft Vers. 1, Sept. 26, 1991. Horgan, et. al., Perils of Software Reliability Modeling, SERC Technical Report, February 3, 1995,  HYPERLINK "http://www.serc.net/TechReports/abstracts/catagory/Reliability.html" http://www.serc.net/TechReports/abstracts/catagory/Reliability.html Kolcio, et. al., Integrating Autonomous Fault Management with Conventional Flight Software: A case study, IEEE, 1999, ???? 6. SOFTWARE DEVELOPMENT ISSUES Brown, D., Solving the Software Safety Paradox, Embedded Systems Programming, volume 11 number 13, Dec. 1998.  HYPERLINK "http://www.embedded.com/98/9812/9812feat2.htm" http://www.embedded.com/98/9812/9812feat2.htm Melkonian, M., Get by Without an RTOS, Embedded Systems Programming, volume 13, number 10, Sept. 2000.  HYPERLINK "http://www.embedded.com/2000/0009/0009feat4.htm" http://www.embedded.com/2000/0009/0009feat4.htm Bell, R. Code Generation from Object Models, Embedded Systems Programming, volume 11, number 3, March 1998.  HYPERLINK "http://www.embedded.com/98/9803fe3.htm" http://www.embedded.com/98/9803fe3.htm Stewart, D. 30 Pitfalls for Real-Time Software Developers, Part 1 , Embedded Systems Programming, volume 12 number 10, Oct. 1999.  HYPERLINK "http://www.embedded.com/1999/9910/9910feat1.htm" http://www.embedded.com/1999/9910/9910feat1.htm Stewart, D. More Pitfalls for Real-Time Software Developers, Embedded Systems Programming, volume 12 number 11, Nov. 1999  HYPERLINK "http://www.embedded.com/1999/9911/9911feat2.htm" http://www.embedded.com/1999/9911/9911feat2.htm Barbagallo, T. Choosing The Right Embedded Software Development Tools, Integrated Systems Design,  HYPERLINK "http://www.isdmag.com/design/embeddedtools/embeddedtools.html" http://www.isdmag.com/design/embeddedtools/embeddedtools.html Dart, S. Spectrum of Functionality in Configuration Management Systems Technical Report CMU/SEI90TR11  HYPERLINK "http://www.sei.cmu.edu/publications/documents/90.reports/90.tr.011.html" http://www.sei.cmu.edu/publications/documents/90.reports/90.tr.011.html OptiMagic, Inc. Frequently-Asked Questions (FAQ) About Programmable Logic,  HYPERLINK "http://www.optimagic.com/faq.html" http://www.optimagic.com/faq.html Barr, M. Programmable Logic: Whats It To Ya?, Embedded Systems Programming, volume 9, number 6, June 1999  HYPERLINK "http://www.embedded.com/1999/9906/9906sr.htm" http://www.embedded.com/1999/9906/9906sr.htm Villasensor, J. and Mangione-Smith, W. H., Configurable Computing, Scientific American, June, 1997  HYPERLINK "http://www.sciam.com/0697issue/0697villasenor.html" http://www.sciam.com/0697issue/0697villasenor.html SEMSPLC Guidelines: Safety-related application software for programmable logic controllers". The Institution of Electrical Engineers. ISBN 0 85296 887 6 Canning, et. al., Sharing Ideas: the SEMSPLC Project, IEE Review , Volume: 40 Issue: 2 , 17 March 1994 Selic, B. Distributed Software Design: Challenges and Solutions, Embedded Systems Programming, Volume 13, number 12, November, 2000  HYPERLINK "http://www.embedded.com/2000/0011/0011feat5.htm" http://www.embedded.com/2000/0011/0011feat5.htm OBrien, M. Embedded Web Servers, Embedded Systems Programming, Volume 12, Number 11, November, 1999.  HYPERLINK "http://www.embedded.com/1999/9911/9911ia2.htm"  http://www.embedded.com/internet/9911/9911ia2.htm  Wood, B. Software Risk Management for Medical Devices, Medical Device & Diagnostic Industry magazine column, Jan. 1999,  HYPERLINK "http://www.devicelink.com/mddi/archive/99/01/013.html" http://www.devicelink.com/mddi/archive/99/01/013.html Schofield, M. Neither Master nor Slave, A Practical Case Study in the Development and Employment of Cleaning Robots, IEEE ????, 1999 Smith, et. al. Validation and Verification of the Remote Agent for Spacecraft Autonomy, ???? Bernard, et. al. Remote Agent Experiment DS1 Technology Validation Report, ???? Reinholz and Patel, Testing Autonomous Systems for Deep Space Exploration, IEEE 1998, ???? Simmons, et. al. Towards Automatic Verification of Autonomous Systems, IEEE 2000, ???? Birk, Andreas Autonomous Systems as distributed embedded devices,  HYPERLINK "http://arti.vub.ac.be/~cyrano/AUTOSYS/" http://arti.vub.ac.be/~cyrano/AUTOSYS/ 7. SOFTWARE ACQUISITION Lindsay, P. and Smith, G. Safety Assurance of Commercial-Off-The-Shelf Software Technical Report No. 00-17, May 2000, Software Verification Research Centre, School of Information Technology, The University of Queensland Besnard, J., Keene, S., and Voas, J. Assuring COTS Products for Reliability and Safety Critical Systems, 1999 Proceedings, Annual Reliability and Maintainability Symposium (IEEE) Voas, J. and Miller, K. Interface Robustness for COTS-based Systems, IEE Colloquium on Cots and Safety Critical Systems (Digest No. 1997/013), 1996, Page(s): 7/1 712 Fischman, L. and McRitchie, K. Off-the-Shelf Software: Practical Evaluation, Crosstalk, Jan. 2000,  HYPERLINK "http://www.stsc.hill.af.mil/crosstalk/2000/jan/fischman.asp" http://www.stsc.hill.af.mil/crosstalk/2000/jan/fischman.asp Voas, J. and Payne, J. COTS Software Failures: Can Anything be Done?, IEEE Workshop on Application-Specific Software Engineering Technology, 1998. ASSET-98. Proceedings, Page(s): 140 144 Fraser, T., Badger, L. and Feldman, M. Hardening COTS Software with Generic Software Wrappers, Proceedings of the 1999 IEEE Symposium on Security and Privacy Guidance for Industry, FDA Reviewers and Compliance on Off-the-Shelf Software Use in Medical Devices, Sept. 9, 1999, US Department of Health and Human Services,  HYPERLINK "http://www.fda.gov/cdrh/ode/1252.pdf" http://www.fda.gov/cdrh/ode/1252.pdf Scott, J., Preckshot, G. and Gallagher, J. Using Commercial-Off-the-Shelf (COTS) Software in High-Consequence Safety Systems, UCRL-JC-122246, Fission Energy and Systems Safety Program (FESSP), Lawrence Livermore National Laboratory,  HYPERLINK "http://www-energy.llnl.gov/FESSP/CSRC/122246.pdf" http://www-energy.llnl.gov/FESSP/CSRC/122246.pdf Sha, L., Goodenough, J. and Pollak, B. Simplex Architecture: Meeting the Challenges of Using COTS in High-Reliability Systems, Crosstalk, April 1998,  HYPERLINK "http://www.stsc.hill.af.mil/crosstalk/1998/apr/simplex.asp" http://www.stsc.hill.af.mil/crosstalk/1998/apr/simplex.asp European Space Agency ARIANE 5: Flight 501 Failure,  HYPERLINK "http://www.esrin.esa.it/tidc/Press/Press96/ariane5rep.html" http://www.esrin.esa.it/tidc/Press/Press96/ariane5rep.html The Use of Commercial Off-The-Shelf Software in Safety Critical Projects by Frances E. Simmons at Johnson Space Center, October 11, 1999. Non-published whitepaper. OTHER REFERENCES Standards and Guidebooks NASA Standards and Guidebooks NPG 8715.3 NASA Safety Manual NASA-CM-GDBK NASA Software Configuration Management Guidebook NASA-GB-001-94 NASA Software Measurement Guidebook NASA-GB-001-95 NASA Software Process Improvement Guidebook NASA-GB-001-96 NASA Software Management Guidebook NASA-GB-002-95 Formal Methods Specification And verification Guidebook For Software And Computer Systems, Volume I: Planning And Technology Insertion NASA-GB-001-97 Formal Methods Specification And Analysis Guidebook For The verification Of software And computer Systems, volume II: A Practitioner's companion NASA-GB-A201 Software Assurance Guidebook NASA-GB-A301 Software Quality Assurance Audits guidebook NASA-GB-A302 Software Formal Inspections Guidebook NASA-STD-2100-91 NASA Software Documentation Standard NASA-STD-2201-93 NASA Software Assurance Standard NASA-STD-2202-93 Software Formal Inspection Process Standard NASA-STD-8719.13A NASA Software Safety Standard KHB-1700.7 Space Shuttle Payload Ground Safety Handbook NSTS-08126 Problem Reporting and Corrective Action (PRACA) System Requirements NSTS-1700-7B Safety Policy and Requirements for Payloads Using the International Space Station, Addendum NSTS-1700-7B Safety Policy and Requirements for Payloads Using the Space Transportation System Change No. 6 NSTS-22206 Requirements for Preparation and Approval of Failure Modes and Effects Analysis (FMEA) and Critical Items List (CIL) NSTS-22254 Methodology for Conduct of Space Shuttle Program Hazard Analyses NSTS-5300-4(1D-2) Safety, Reliability, Maintainability and Quality Provisions for the Space Shuttle Program Change No. 2 NSTS-5300.4 Safety, Reliability, Maintainability and Quality Provisions for Space Shuttle Program NSTS-ISS-18798 Interpretations of NSTS/ISS Payload Safety Requirements SSP-50021 Safety Requirements Document, International Space Station Program SSP-50038 Computer-Based Control System Safety Requirements, International Space Station Program IEEE Standards ISO/IEC 12207 Information Technology - Software Life Cycle Processes EIA 12207.0, .1, .2 Industry Implementation of International Standard ISO/IEC 12207 : 1995 IEEE 610.12 IEEE Standard Glossary of Software Engineering Terminology IEEE 830-1998 IEEE Recommended Practice for Software Requirements Specifications IEEE 982.1 IEEE Standard Dictionary Of Measures To Produce Reliable Software IEEE 1016-1998 IEEE Recommended Practice for Software Design Descriptions IEEE 1228-1994 IEEE Standard for Software Safety Plans Military Standards DoD-STD-498 Software Development and Documentation, cancelled in 1998. Replaced by IEEE 12207. MIL-STD-882D System Safety Program Requirements MIL-STD-882C System Safety Program Requirements, January 19, 1993 MIL-STD-882B System Safety Program Requirements, July 1, 1987 Other Standards DO-178B Software Considerations in Airborne Systems and Equipment Certification (Federal Aviation Administration). AIAA G-010 Reusable Software: Assessment Criteria for Aerospace Applications ANSI/AIAA R-013 Recommended Practice: Software Reliability R-013-1992 ISO 9000-3 Quality Management And Quality Assurance Standards - Part 3: Guidelines For The Application Of ISO 9001: 1994 To The Development, Supply, Installation And Maintenance Of Computer Software Second Edition Review Guidelines on Software Languages for Use in Nuclear Power Plant Safety Systems (see  HYPERLINK "http://www.sohar.com/J1030/appb.htm" http://www.sohar.com/J1030/index.htm) To access NASA standards and other standards used by NASA:  HYPERLINK "http://standards.nasa.gov/sitemap.htm" http://standards.nasa.gov/sitemap.htm Books Software Safety and Reliability : Techniques, Approaches, and Standards of Key Industrial Sectors, Debra S. Herrmann, et al., March 2000 Safeware : System Safety and Computers, Nancy Leveson, April 1995 Safety-Critical Computer Systems, Neil Storey, August 1996 Software Assessment: Reliability, Safety, Testability, Michael A. Friedman and Jeffrey M. Voas (Contributor), August 16, 1995 Semsplc Guidelines : Safety-Related Application Software for Programmable Logic Controllers, February 1999 Websites NASA Websites NASA Lessons Learned  HYPERLINK "http://llis.nasa.gov/llis/llis/main.html" http://llis.nasa.gov/llis/llis/main.html NASA Technical Standards  HYPERLINK "http://standards.nasa.gov/sitemap.htm" http://standards.nasa.gov/sitemap.htm NASA Online Directives Information System (NODIS) Library  HYPERLINK "http://nodis.hq.nasa.gov/" http://nodis.hq.nasa.gov/ NASA Documents Online (HQ)  HYPERLINK "http://www.hq.nasa.gov/office/hqlibrary/books/nasadoc.htm" http://www.hq.nasa.gov/office/hqlibrary/books/nasadoc.htm ISS Documentation (PALS)  HYPERLINK "http://iss-www.jsc.nasa.gov:1532/palsagnt/plsql/palshome" http://iss-www.jsc.nasa.gov:1532/palsagnt/plsql/palshome NASA Langley Formal Methods group:  HYPERLINK "http://atb-www.larc.nasa.gov/fm/index.html" http://atb-www.larc.nasa.gov/fm/index.html GSFC Software Engineering Laboratory  HYPERLINK "http://sel.gsfc.nasa.gov/" http://sel.gsfc.nasa.gov/ NASA Software Assurance Technology Center  HYPERLINK "http://satc.gsfc.nasa.gov/homepage.html" http://satc.gsfc.nasa.gov/homepage.html NASA IV&V Center  HYPERLINK "http://www.ivv.nasa.gov/" http://www.ivv.nasa.gov/ NASA Software Working Group  HYPERLINK "http://swg.jpl.nasa.gov/index.shtml" http://swg.jpl.nasa.gov/index.shtml Reference Websites Guide to the Software Engineering Body of Knowledge (SWEBOK)  HYPERLINK "http://www.swebok.org/" http://www.swebok.org/ Software metrics links:  HYPERLINK "http://www.totalmetrics.com/resource/links.htm" http://www.totalmetrics.com/resource/links.htm Software Methods and Tools:  HYPERLINK "http://www.methods-tools.com/html/tools.html" http://www.methods-tools.com/html/tools.html Standards (and 37 Cross References)  HYPERLINK "http://www.cmpcmm.com/cc/standards.html" http://www.cmpcmm.com/cc/standards.html Software Safety Software System Safety Working Group  HYPERLINK "http://sunnyday.mit.edu/safety-club/" http://sunnyday.mit.edu/safety-club/ Safety critical systems links:  HYPERLINK "http://archive.comlab.ox.ac.uk/safety.html" http://archive.comlab.ox.ac.uk/safety.html A Framework for the Development and Assurance of High Integrity Software  HYPERLINK "http://hissa.ncsl.nist.gov/publications/sp223/" http://hissa.ncsl.nist.gov/publications/sp223/ Safety Critical Resources  HYPERLINK "http://www.cera2.com/WebID/realtime/safety/blank/org/a-z.htm" http://www.cera2.com/WebID/realtime/safety/blank/org/a-z.htm Software QA and Testing Society for Software Quality:  HYPERLINK "http://www.ssq.org/welcome_main.html" http://www.ssq.org/welcome_main.html Software Testing hotlist:  HYPERLINK "http://www.io.com/~wazmo/qa/" http://www.io.com/~wazmo/qa/ Guidance for Industry, General Principles of Software Validation, Draft Guidance Version 1.1 (FDA)  HYPERLINK "http://www.fda.gov/cdrh/comp/swareval.html" http://www.fda.gov/cdrh/comp/swareval.html Software Testing Stuff:  HYPERLINK "http://www.testingstuff.com/testing2.html" http://www.testingstuff.com/testing2.html Software QA/Test Resource Center  HYPERLINK "http://www.softwareqatest.com/" http://www.softwareqatest.com/ SR/Institute's Software Quality HotList  HYPERLINK "http://www.testworks.com/Institute/HotList/index.9.html" http://www.testworks.com/Institute/HotList/index.9.html TestingCraft tester knowledge exchange  HYPERLINK "http://www.testingcraft.com/index.html" http://www.testingcraft.com/index.html Miscellaneous Software Project Survival Guide  HYPERLINK "http://www.construx.com/survivalguide/chapter.htm" http://www.construx.com/survivalguide/chapter.htm Sample Software Documents  HYPERLINK "http://www.construx.com/doc.htm" http://www.construx.com/doc.htm Software Documents, military,  HYPERLINK "http://www.pogner.demon.co.uk/mil_498/6.htm" http://www.pogner.demon.co.uk/mil_498/6.htm Annals of Software Engineering  HYPERLINK "http://manta.cs.vt.edu/ase/" http://manta.cs.vt.edu/ase/ Software Engineering Readings  HYPERLINK "http://www.qucis.queensu.ca/Software-Engineering/reading.html" http://www.qucis.queensu.ca/Software-Engineering/reading.html Introduction to Software Engineering  HYPERLINK "http://www.caip.rutgers.edu/~marsic/Teaching/ISE-online.html" http://www.caip.rutgers.edu/~marsic/Teaching/ISE-online.html Software Engineering hotlist  HYPERLINK "http://www.cc.gatech.edu/computing/SW_Eng/hotlist.html" http://www.cc.gatech.edu/computing/SW_Eng/hotlist.html Brad Appleton's Software Engineering Links  HYPERLINK "http://www.enteract.com/~bradapp/links/swe-links.html" http://www.enteract.com/~bradapp/links/swe-links.html Best Manufacturing Practices guidelines  HYPERLINK "http://www.bmpcoe.org/guideline/books/index.html" http://www.bmpcoe.org/guideline/books/index.html Embedded systems programming  HYPERLINK "http://www.embedded.com/" http://www.embedded.com/ Embedded systems articles  HYPERLINK "http://www.ganssle.com/" http://www.ganssle.com/ APPENDIX A Glossary of Terms Various definitions contained in this Glossary are reproduced from IEEE Standard 610.12-1990, IEEE Standard Glossary of Software Engineering Terminology, copyright 81990 by the Institute of Electrical and Electronic Engineers, Inc. The IEEE takes no responsibility for and will assume no liability for damages resulting from the reader's misinterpretation of said information resulting from the placement and context in this publication. Terminology Definition Access type A value of an access type is either a null value or a value that designates an object created by an allocator. The designated object can be read and updated via the access value. The definition of an access type specifies the type of objects designated by values of the access type. Accident See Mishap. Accreditation Certification A formal declaration by the Accreditation Authority that a system is approved to operate in a particular manner using a prescribed set of safeguards. Annotations Annotations are written as Ada comments (i.e. preceded with so the compiler ignores it) beginning with a special character, #, that signals to the Static code analysis tool that special information is to be conveyed to the tool. Anomalous Behavior Behavior which is not in accordance with the documented requirements. Anomaly A state or condition which is not expected. It may or may not be hazardous, but it is the result of a transient hardware or coding error. Architecture The organizational structure of a system or CSCI, identifying its components, their interfaces and a concept of execution between them. Assertions A logical expression specifying a program state that must exist or a set of conditions that program variables must satisfy at a particular point during a program execution. Types include input assertion, loop assertion, and output assertion. (IEEE Standard 610.12-1990) Associate Developer An organization that is neither prime contractor nor subcontractor to the developer, but who has a development role on the same or related project. Assurance To provide confidence and evidence that a product or process satisfies given requirements of the integrity level and applicable national and international law(s) Audit An independent examination of the life cycle processes and their products for compliance, accuracy, completeness and traceability. Audit Trail The creation of a chronological record of system activities (audit trail) that is sufficient to enable the reconstruction, review and examination of the sequence of environments and activities surrounding or leading to an operation, procedure or an event in a transaction from its inception to its final results. Authenticate To verify the identity of a user, device or other entity in a system, often as a prerequisite to allowing access to resources in the system. Authorization The granting of access rights to a user, program or process. Automata A machine or controlling mechanism designed to follow automatically a predetermined sequence of operations or correct errors or deviations occurring during operation. Baseline The approved, documented configuration of a software or hardware configuration item, that thereafter serves as the basis for further development and that can be changed only through change control procedures. Battleshort (Safety Arc) The capability to bypass certain safety features in a system to ensure completion of a mission without interruption due to the safety feature. Bypassed safety features include such items as circuit current overload protection, thermal protection, etc. Build (1) A version of software that meets a specific subset of the requirements that the completed software will meet. (2) The period of time during which a version is developed. NOTE: The relationship of the terms "build" and "version" is up to the developer; for example, it may take several versions to reach a build, a build may be released in several parallel versions (such as to different sites), or the terms may be used as synonyms. BuiltIn Test (BIT) A design feature of an item which provides information on the ability of the item to perform its intended functions. BIT is implemented in software or firmware and may use or control built in test equipment. BuiltIn Test Equipment (BITE) Hardware items that support BIT. Catastrophic Hazard A hazard which can result in a disabling or fatal personnel injury and/or loss of flight hardware or a ground facility. Caution and Warning (C&W) Function for detection, annunciation and control of impending or imminent threats to crew safety or mission success. Certification Legal recognition by the certification authority that a product, service, organization or person complies with the applicable requirements. Such certification comprises the activity of checking the product, service, organization or person and the formal recognition of compliance with the applicable requirements by issue of a certificate, license, approval or other document as required by national law or procedures. In particular, certification of a product involves: (a) the process of assuring the design of a product to ensure that it complies with a set of standards applicable to that type of product so as to demonstrate an acceptable level of safety; (b) the process of assessing an individual product to ensure that it conforms with the certified type design; (c) the issue of any certificate required by national laws to declare that compliance or conformity has been found with applicable standards in accordance with items (a) or (b) above. Code Safety Analysis (CSA) An analysis of program code and system interfaces for events, faults, and conditions that could cause or contribute to undesirable events affecting safety. Cohesion (1) DeMarco: Cohesion is a measure of strength of association of the elements of a module. (2) IEEE: The manner and degree to which the tasks performed by a single software module are related to one another. Types include coincidental, communication, functional, logical, procedural, sequential and temporal. Command Any message that causes the receiving party to perform an action. Computer Hardware Devices capable of accepting and storing computer data, executing a systematic sequence of operations on computer data, or producing control outputs. Such devices can perform substantial interpretation, computation, communication, control or other logical functions. Computer Program A combination of computer instructions and data definitions that enable computer hardware to perform computational or control functions. Computer Software Configuration Item An aggregate of software that is designated for configuration (CSCI) management and is treated as a single entity in the configuration management process. (IEEE Standard 610.12-1990) Concept/Conceptual The period of time in the software development cycle during which the user needs are described and evaluated through documentation (for example, statement of needs, advance planning report, project initiation memo, feasibility studies, system definition, documentation, regulations, procedures, or policies relevant to the project). Configuration The requirements, design and implementation that define a particular version of a system or system component. Configuration Control The process of evaluating, approving or disapproving, and coordinating changes to configuration items after formal establishment of their configuration identification. Configuration Item (1) An item that is designated for configuration management. (2) A collection of hardware or software elements treated as a unit for the purpose of configuration management. (3) An aggregation of hardware, software or both that satisfies an end user function and is designated for separate configuration management by the acquirer. Configuration Management The process of identifying and defining the configuration items in a system, controlling the release and change of these items throughout the system life cycle, recording and reporting the status of configuration items and change requests, and verifying the completeness and correctness of configuration items. Control Path The logical sequence of flow of a control or command message from the source to the implementing effector or function. A control path may cross boundaries of two or more computers. Controlling Application The lower level application software that controls the particular function and its sensors and detectors. Controlling Executive The upper level software executive that serves as the overseer of the entire system including the lower level software. COTS Commercial-off-the-shelf. This refers primarily to commercial software purchased for use in a system. COTS can include operating systems, libraries, development tools, as well as complete applications. The level of documentation varies with the product. Analyses and test results are rarely available. These products are market driven, and usually contain known bugs. Coupling DeMarco: Coupling is a measure of the interdependence of modules The manner and degree of interdependence between software modules. Types include commonenvironment coupling, content coupling, control coupling, data coupling, hybrid coupling, and pathological coupling. Coverage A measure of achieving a complete assessment. 100% coverage is every one of the type specified, e.g. in the situation of test coverage, an assessment of 100% decision coverage is achieved when every one of the decisions in the software has been exercised. Coverage Analysis An analysis of the degree(s) of coverage assessed. Credible Failure A condition that has the potential of occurring based on actual failure modes in similar systems. Critical Design Review (CDR) A review conducted to verify that the detailed design of one or more configuration items satisfy specified requirements; to establish the compatibility among configuration items and other items of equipment, facilities, software, and personnel; to assess risk areas for each configuration item; and, as applicable, to assess the results of the producibility analyses, review preliminary hardware product specifications, evaluate preliminary test planning, and evaluate the adequacy of preliminary operation and support documents. (IEEE Standard 610.12-1990) For Computer Software Configuration Items (CSCIs), this review will focus on the determination of the acceptability of the detailed design, performance, and test characteristics of the design solution, and on the adequacy of the operation and support documents. Critical Hazard A hazard which could result in a nondisabling personnel injury, damage to flight hardware or ground support equipment, loss of an emergency system, or use of unscheduled safing procedures. Critical Software Command A command that either removes a safety inhibit or creates a hazardous condition or state.  See also Hazardous Command. Database A collection of related data stored in one or more computerized files in a manner that can be accessed by users or computer programs via a database management system. Data Flow Diagram DFD and CFD diagrams are a graphical representation Control Flow Diagram of a system under the Structured Analysis/Structured (DFDCFD) Design methodology. Control Flow Diagrams represent the flow of control signals in the system, while Data Flow Diagrams represent the flow of data. Deactivated Code (1) A software program or routine or set of routines, which were specified, developed and tested for one system configuration and are disabled for a new system configuration. The disabled functions(s) is (are) fully tested in the new configuration to demonstrate that if inadvertently activated the function will result in a safe outcome within the new environment. (2) Executable code (or data) which by design is either (a) not intended to be executed (code) or used (data), or (b) which is only executed (code) or used (data) in certain configurations of the target system. Dead Code (1) Dead Code is code (1) unintentionally included in the baseline, (2) left in the system from an original software configuration, not erased or overwritten and left functional around it, or (3) deactivated code not tested for the current configuration and environment. (2) Executable code (or data) which, as a result of design, maintenance, or installation error cannot be executed (code) or used (data) in any operational configuration of the target system and is not traceable to a requirement (e.g., embedded identifier is OK) Deadlock A situation in which computer processing is suspended because two or more devices or processes are each awaiting resources assigned to the other. (IEEE Standard 610.12-1990) Debug The process of locating and eliminating errors that have been shown, directly or by inference, to exist in software. Degree of Demonstration Extent to which evidence is produced to provide confidence that specified requirements are fulfilled (ISO 8402, 4.5). Note the extent depends on criteria such as economics, complexity, innovation, safety and environmental considerations. Developer The organization required to carry out the requirements of this standard and the associated contract. The developer may be a contractor or a Government agency. Development Configuration The requirements, design and implementation that define a particular version of a system or system component. Document/Documentation A collection of data, regardless of the medium on which it is recorded, that generally has permanence and can be read by humans or machines. Dormant Code Similar to dead code, it is software instructions that are included in the executable but not meant to be used. Dormant code is usually the result of COTS or reused software that include extra functionality over what is required. Dynamic allocation Dynamic allocation is the process of requesting to the operating system memory storage for a data structure that is used when required by the application program's logic. Successful allocation of memory (if memory space is available) may be from the task's heap space. Emulator A combination of computer program and hardware that mimic the instruction and execution of another computer or system. Environment (1) The aggregate of the external procedures, conditions and objects that affect the development, operation and maintenance of a system. (2) Everything external to a system which can affect or be affected by the system. Error (1) Mistake in engineering, requirement specification, or design. (2) Mistake in design, implementation or operation which could cause a failure. Error Handling An implementation mechanism or design technique by which software faults are detected, isolated and recovered to allow for correct runtime program execution. Exception Exception is an error situation that may arise during program execution. To raise an exception is to abandon normal program execution to signal that the error has taken place. FailSafe (1) Ability to sustain a failure and retain the capability to safely terminate or control the operation. (2) A design feature that ensures that the system remains safe or will cause the system to revert to a state which will not cause a mishap. Failure The inability of a system or component to perform its required functions within specified performance requirements. (IEEE Standard 610.12-1990) Failure Tolerance The ability of a system or subsystem to perform its function(s) or maintain control of a hazard in the presence of failures within its hardware, firmware, or software. Fault Any change in state of an item that is considered to be anomalous and may warrant some type of corrective action. Examples of faults included device errors reported by Built-In Test (BIT)/Built-In Test Equipment (BITE), out-of-limits conditions on sensor values, loss of communication with devices, loss of power to a device, communication error on bus transaction, software exceptions (e.g., divide by zero, file not found), rejected commands, measured performance values outside of commanded or expected values, an incorrect step, process, or data definition in a computer program, etc. Faults are preliminary indications that a failure may have occurred. Fault Detection A process that discovers or is designed to discover faults; the process of determining that a fault has occurred. Fault Isolation The process of determining the location or source of a fault. Fault Recovery A process of elimination of a fault without permanent reconfiguration. Fault Tree A schematic representation, resembling an inverted tree, of possible sequential events (failures) that may proceed from discrete credible failures to a single undesired final event (failure). A fault tree is created retrogressively from the final event by deductive logic. Finite State Machine Also known as: Requirements State Machine, State of Finite Automation Transition Diagram. A model of a multistate entity, depicting the different states of the entity, and showing how transitions between the states can occur. A finite state machine consists of: 1. A finite set of states 2. A finite set of unique transitions. Firmware Computer programs and data loaded in a class of memory that cannot be dynamically modified by the computer during processing (e.g. ROM). Flight Hardware Hardware designed and fabricated for ultimate use in a vehicle intended to fly. Formal Methods (1) The use of formal logic, discrete mathematics, and System machinereadable languages to specify and verify software. (2) The use of mathematical techniques in design and analysis of the system. Formal Verification (For Software) The process of evaluating the products of a given phase using formal mathematical proofs to ensure correctness and consistency with respect to the products and standards provided as input to that phase. GOTS Government-off-the-shelf. This refers to government created software, usually from another project. The software was not created by the current developers (see reused software). Usually, source code is included and all available documentation, including test and analysis results. Graceful Degradation (1) A planned stepwise reduction of function(s) or performance as a result of failure, while maintaining essential function(s)/performance. (2) The capability of continuing to operate with lesser capabilities in the face of faults or failures or when the number or size of tasks to be done exceeds the capability to complete. Graph theory An abstract notation that can be used to represent a machine that transitions through one or more states. Ground Support Equipment (GSE) Groundbased equipment used to store, transport, handle, test, check out, service, and/or control aircraft, launch vehicles, spacecraft, or payloads. Hardware Configuration Item An aggregation of a hardware device and computer (HWCI) instructions and/or computer data that reside as readonly software on a hardware device. Hazard The presence of a potential risk situation caused by an unsafe act or condition. A condition or changing set of circumstances that presents a potential for adverse or harmful consequences; or the inherent characteristics of any activity, condition or circumstance which can produce adverse or harmful consequences. Hazard Analysis The determination of potential sources of danger and recommended resolutions in a timely manner for those conditions found in either the hardware/software systems, the person/machine relationship, or both, which cause loss of personnel capability, loss of system, loss of life, or injury to the public. Hazard Cause A component level Fault or failure which can increase the risk of, or result in a hazard. Hazard Control Design or operational features used to reduce the likelihood of occurrence of a hazardous effect. Hazardous Command A command whose execution (including inadvertent, out-of-sequence, or incorrectly executed) could lead to an identified critical or catastrophic hazard, or a command whose execution can lead to a reduction in the control of a hazard (including reduction in failure tolerance against a hazard or the elimination of an inhibit against a hazard). Hazardous State A state that may lead to an unsafe state. Hazard Report The output of a hazard analysis for a specific hazard which documents the hazard title, description, causes, control, verification, and status. Hazard Risk Index A combined measure of the severity and likelihood of a hazard. See Table in Section 2. Hazard Severity An assessment of the consequences of the worst credible mishap that could be caused by a specific hazard. Higher Order Logic A functional language for specifying requirements, used in Formal Methods. Independent Assessment (IA) A formal process of assessing (auditing) the development, verification, and validation of the software. An IA does not perform those functions (as in IV&V), but does evaluate how well the project did in carrying them out. Independent Inhibit Two inhibits are independent if no SINGLE failure, error, event, or environment can eliminate more than one inhibit. Three inhibits are independent if no TWO failures, errors, events or environments (or any pair of one of each) can eliminate more than two inhibits. Independent Verification and Validation A process whereby the products of the software development (IV & V) life cycle phases are independently reviewed, verified, and validated by an organization that represents the acquirer of the software and is completely independent of the provider. Inhibit A design feature that provides a physical interruption between an energy source and a function (e.g., a relay or transistor between a battery and a pyrotechnic initiator, a latch valve between a propellant tank and a thruster, etc.). Interface In software development, a relationship among two or more entities (such as CSCICSCI, CSCIHWCI, CSCIUser or software unitsoftware unit) in which the entities share, provide or exchange data. An interface is not a CSCI, software unit or other system component; it is a relationship among them. Interface Hazard Analysis Evaluation of hazards which cross the interfaces between a specified set of components, elements, or subsystems. Interlock Hardware or software function that prevents succeeding operations when specific conditions exist. Life Cycle The period of time that starts when a software product is conceived and ends when the software is no longer available for use. The software life cycle traditionally has eight phases: Concept and Initiation; Requirements; Architectural Design; Detailed Design; Implementation; Integration and Test; Acceptance and Delivery; and Sustaining Engineering and Operations. Machine Code Low level language Computer software, usually in binary notation, unique to the processor object in which it is executed. The same as object code. Maintainability The ability of an item to be retained in or restored to specified condition when maintenance is performed by personnel having specified skill levels, using prescribed procedures, resources and equipment at each prescribed level of maintenance and repair. Marginal Hazard A hazard whose occurrence would result in minor occupational injury or illness or property damage. Memory Integrity The assurance that the computer program or data is not altered or destroyed inadvertently or deliberately. Mishap An unplanned event or series of events that results in death, injury, occupational illness, or damage to or loss of equipment, property, or damage to the environment; an accident. NVersion Software Software developed in two or more versions using different specifications, programmers, languages, platforms, compilers, or combinations of some of these. This is usually an attempt to achieve independence between redundant software items. Research has shown that this method usually does not achieve the desired reliability, and it is no longer recommended. Negative Testing Software Safety Testing to ensure that the software will not go to a hazardous state or generate outputs that will create a hazard in the system in response to out of bound or illegal inputs. Negligible Hazard Probably would not affect personnel safety but is a violation of specific criteria. No-Go Testing Software Safety Testing to ensure that the software performs known processing and will go to a known safe state in response to specific hazardous situations. Object Code Low level language Computer software, usually in binary notation, unique to the processor object in which it is executed. The same as machine code. Objective Evidence Information which can be proved true, based on facts obtained through observation, measurement, test or other means. Operator Error An inadvertent action by flight crew or ground operator that could eliminate, disable, or defeat an inhibit, redundant system, containment feature, or other design features that is provided to control a hazard. Override The forced bypassing of prerequisite checks on the operator-commanded execution of a function. Execution of any command (whether designated as a hazardous command or not) as an override is considered to be a hazardous operation requiring strict procedural controls and operator safing. (ISS) Patch (1) A modification to a computer subprogram that is separately compiled inserted into machine code of a host or parent program. This avoids modifying the source code of the host/parent program. Consequently the parent/host source code no longer corresponds to the combined object code. (2) A change to machine code (object code) representation of a computer program and bypassing the compiler Path The logical sequential structure that the program must execute to obtain a specific output. Peer Review An overview of a computer program presented by the author to others working on similar programs in which the author must defend his implementation of the design. Note: A phase does not imply the use of any specific lifecycle model, nor does it imply a period of time in the development of a software product. Predicate Predicate is any expression representing a condition of the system. Preliminary Design Review (PDR) A review conducted to evaluate the progress, technical adequacy, and risk resolution of the selected design approach for one or more configuration items; to determine each design's compatibility with the requirements for the configuration item; to evaluate the degree of definition and assess the technical risk associated with the selected manufacturing methods and processes; to establish the existence and compatibility of the physical and functional interfaces among the configuration items and other items of equipment, facilities, software, and personnel; and as appropriate, to evaluate the preliminary operation and support documents. (IEEE Standard 610.12-1990) For CSCIs, the review will focus on: (1) the evaluation of the progress, consistency, and technical adequacy of the selected architectural design and test approach, (2) compatibility between software requirements and architectural design, and (3) the preliminary version of the operation and support documents. Preliminary Hazard Analysis (PHA) Analysis performed at the system level to identify safety-critical areas, to provide an initial assessment of hazards, and to identify requisite hazard controls and follow-on actions. Program Description Language (PDL) PDL is used to describe a high level design that is an intermediate step before actual code is written. Redundancy Provision of additional functional capability (hardware and associated software) to provide at least two means of performing the same task. Regression Testing The testing of software to confirm that functions, that were previously performed correctly, continue to perform correctly after a change has been made. Reliability The probability of a given system performing its mission adequately for a specified period of time under the expected operating conditions. Rendezvous A rendezvous is the interaction that occurs between two parallel tasks when one task has called an entry of the other task, and a corresponding accept statement is being executed by the other task on behalf of the calling task. Requirement(s) (1) Condition or capability needed by a user to solve a problem or achieve an objective. (2) Statements describing essential, necessary or desired attributes. Requirements, Derived (1) Essential, necessary or desired attributes not explicitly documented, but logically implied by the documented requirements. (2) Condition or capability needed, e.g. due to a design or technology constraint, to fulfill the user's requirement(s). Requirements, Safety Those requirements which cover functionality or capability associated with the prevention or mitigation of a hazard. Requirement Specification Specification that sets forth the requirements for a system or system component. Requirements State Machine See Finite State Machine Reusable Software A software product developed for one use but having other uses, or one developed specifically to be usable on multiple project or in multiple roles on one project. Examples include, but are not limited to, commercialofftheshelf software (COTS) products, acquirerfurnished software products, software products in reuse libraries, and preexisting developer software products. Each use may include all or part of the software product and may involve its modification. This term can be applied to any software product (for example, requirements, architectures, etc.), not just to software itself. Reused Software This is software previously written by an in-house development team and used on a different project. GOTS software would come under this category if it is supplied to another government project. Because this software was verified and validated for a previous project, it is often assumed to work correctly in the new system. Each piece of reused software should be thoroughly analyzed for its operation in the new system. Remember the problems when the Ariane 4 software was used in Ariane 5! Risk (1) As it applies to safety, exposure to the chance of injury or loss. It is a function of the possible frequency of occurrence of the undesired event, of the potential severity of resulting consequences, and of the uncertainties associated with the frequency and severity. (2) A measure of the severity and likelihood of an accident or mishap. (3) The probability that a specific threat will exploit a particular vulnerability of the system. Safe (Safe State) (1) The state of a system defined by having no identified hazards present and no active system processes which could lead to an identified hazard. (2) A general term denoting an acceptable level of risk, relative freedom from and low probability of: personal injury; fatality; loss or damage to vehicles, equipment or facilities; or loss or excessive degradation of the function of critical equipment. Safety Freedom from hazardous conditions. Safety Analysis A systematic and orderly process for the acquisition and evaluation of specific information pertaining to the safety of a system. Safety Architectural Design Analysis Analysis performed on the high-level design to verify the (SADA) correct incorporation of safety requirements and to analyze the Safety-Critical Computer Software Components (SCCSCs). Safety-Critical Those software operations that, if not performed, performed out-of sequence, or performed incorrectly could result in improper control functions (or lack of control functions required for proper system operation) that could directly or indirectly cause or allow a hazardous condition to exist. Safety-Critical Computer Software Those computer software components (processes, modules, Component (SCCSC) functions, values or computer program states) whose errors (inadvertent or unauthorized occurrence, failure to occur when required, occurrence out of sequence, occurrence in combination with other functions, or erroneous value) can result in a potential hazard, or loss of predictability or control of a system. SafetyCritical Computing System A computing system containing at least one SafetyCritical Function. SafetyCritical Computing Those computer functions in which an error can result in a potential hazard to the user, friendly forces, materiel, third parties or the environment. Safety-Critical Software Software that: (1) Exercises direct command and control over the condition or state of hardware components; and, if not performed, performed out-of-sequence, or performed incorrectly could result in improper control functions (or lack of control functions required for proper system operation), which could cause a hazard or allow a hazardous condition to exist. (2) Monitors the state of hardware components; and, if not performed, performed out-of-sequence, or performed incorrectly could provide data that results in erroneous decisions by human operators or companion systems that could cause a hazard or allow a hazardous condition to exist. (3) Exercises direct command and control over the condition or state of hardware components; and, if performed inadvertently, out-of-sequence, or if not performed, could, in conjunction with other human, hardware, or environmental failure, cause a hazard or allow a hazardous condition to exist. Safety Detailed Design Analysis (SDDA) Analysis performed on Safety-Critical Computer Software Components to verify the correct incorporation of safety requirements and to identify additional hazardous conditions. Safety Kernel An independent computer program that monitors the state of the system to determine when potentially unsafe system states may occur or when transitions to potentially unsafe system states may occur. The Safety Kernel is designed to prevent the system from entering the unsafe state and return it to a known safe state. Safing The sequence of events necessary to place systems or portions thereof in predetermined safe conditions. Sensor A transducer that delivers a signal for input processing. Separate Control Path A control path which provides functional independence to a command used to control an inhibit to an identified critical or catastrophic hazard. Software (1) Computer programs and computer databases. Note: although some definitions of software include documentation, MILSTD498 limits the definition to programs and computer databases in accordance with Defense Federal Acquisition Regulation Supplement 227.401 (MILSTD498). (2) Organized set of information capable of controlling the operation of a device. Software Assurance (SA) The process of verifying that the software developed meets the quality, safety, reliability, security requirements as well as technical and performance requirements. Assurance looks at both the process used to develop the software and the analyses and tests performed to verify the software. Software Quality Assurance (SQA) and Software Product Assurance (SPA) are sometimes used interchangeably with Software Assurance. Software Controllable Inhibit A system-level hardware inhibit whose state is controllable by software commands. Software Error The difference between a computed, observed or measured value or condition and the true, specified or theoretically correct value or condition. Software Fault An incorrect step, process or data definition in a computer system. Software Inhibit A software or firmware feature that prevents a specific event function from occurring or a specific function from being available. The software may be resident in any medium. (A software inhibit is not in itself an "inhibit" in the sense of providing a physical interrupt between an energy source and a function.) Software Partitioning Separation, physically and/or logically, of (safetycritical) functions from other functionality. Software Requirements Review (SRR) A review of the requirements specified for one or more software configuration items to evaluate their responsiveness to and interpretation of system requirements and to determine whether they form a satisfactory basis for proceeding into a preliminary (architectural) design of configuration items. (IEEE Standard 610.12-1990) Same as Software Specification Review for MIL-STD-498. Software Requirements Specification Documentation of the essential requirements (functions, (SRS) performance, design constraints, and attributes) of the software and its external interfaces. (IEEE Standard 610.12-1990) Software Safety Requirements Analysis Analysis performed to examine system and software (SSRA) requirements and the conceptual design in order to identify unsafe modes for resolution, such as out-of-sequence, wrong event, deadlocking, and failure-to-command modes. Software Specification Review (SSR) Same as Software Requirements Review. Software Safety The application of the disciplines of system safety engineering techniques throughout the software life cycle to ensure that the software takes positive measures to enhance system safety and that errors that could reduce system safety have been eliminated or controlled to an acceptable level of risk. Software Safety Engineering The application of System Safety Engineering techniques to software development in order to ensure and verify that software design takes positive measures to enhance the safety of the system and eliminate or control errors which could reduce the safety of the system. System Safety Application of engineering and management principles, criteria, and techniques to optimize safety and reduce risks within the constraints of operational effectiveness, time, and cost throughout all phases of the system life cycle. Software Specification Review (SSR) Same as Software Requirements Review State Transition Diagram (See also Finite State Machine). Directed graph used in many Object Oriented methodologies, in which nodes represent system states and arcs represent transitions between states. System A set of components which interact to perform some function or set of functions. System Safety Application of engineering and management principles, criteria, and techniques to optimize safety and reduce risks within the constraints of operational effectiveness, time, and cost throughout all phases of the system life cycle. System Safety Engineering An engineering discipline requiring specialized professional knowledge and skills in applying scientific and engineering principles, criteria, and techniques to identify and eliminate hazards, or reduce the associated risk. System Safety Management A management discipline that defines system safety program requirements and attempts to ensure the planning, implementation and accomplishment of system safety tasks and activities consistent with the overall program requirements. System Specification Document stating requirements for the system. Test Case A set of test inputs, execution conditions and expected results used to determine whether the expected response is produced. Testing The process of executing a series of test cases and evaluating the results. Test Procedure (1) Specified way to perform a test. (2) Detailed instructions for the setup and execution of a given set of test cases and instructions for the evaluation of results executing the test cases. Test Readiness Review (TRR) A review conducted to evaluate preliminary test results for one or more configuration items; to verify that the test procedures for each configuration item are complete, comply with test plans and descriptions, and satisfy test requirements; and to verify that a project is prepared to proceed to formal test of the configuration items. (IEEE Standard 610.12-1990) Test, Stress For software, this is testing by subjecting the software to extreme external conditions and anomalous situations in which the software is required to perform correctly. Time to Criticality The time between the occurrence of a failure, event or condition and the subsequent occurrence of a hazard or other undesired outcome. Traceability Traceability for software refers to documented mapping of requirements into the final product, through all development life cycles. Transition A transition is when an input causes a state machine to change state. Trap Software feature that monitors program execution and critical signals to provide additional checks over and above normal program logic. Traps provide protection against undetected software errors, hardware faults, and unexpected hazardous conditions. Trigger Triggers are one or more conditions that when all are true enable a specific action to take place. Type (As used in software design). A type characterizes both a set of values and a set of operations applicable to those values. Typing of variables can be strong or weak. Strong typing is when only defined values of a variable and defined operations are allowed. Weak typing refers to when the restrictions that are applied are very loose (i.e. a declaration of type integer with no range or operation definition). Undocumented Code Software code that is used by the flight software but is not documented in the software design. Usually this pertains to Commercial OfftheShelf Software(COTS) because the documentation is not always available. Unsafe State A system state that may result in a mishap. Unused Code Software code that resides in the flight software that is not intended for use during nominal or contingency situation. Examples are test code, nooped coded (code that is bypassed), and code that is retained by not being used from one operational increment to the next. Validation (1) An evaluation technique to support or corroborate safety requirements to ensure necessary functions are complete and traceable. (2) The process of evaluating software at the end of the software development process to ensure compliance with software requirements. (3) Confirmation by examination and provision of objective evidence that the particular requirements for a specific use are fulfilled (for software). The process of evaluating software to ensure compliance with specified. (4) The process of determining whether the system operates correctly and executes the correct functions. Verification (1) The process of determining whether the products of a given phase of the software development cycle fulfill the requirements established during the previous phase(s) (see also validation). (2) Formal proof of program correctness. (3) The act of reviewing, inspecting, testing, checking, auditing, or otherwise establishing and documenting whether items, processes, services, or documents conform to specified requirements. (4) Confirmation by examination and provision of objective evidence that specified requirements have been fulfilled (for software). (5) The process of evaluating the products of a given phase to determine the correctness and consistency of those products with respect to the products and standards provided as input to that phase. Waiver A variance that authorizes departure from a particular safety requirement where alternate methods are employed to mitigate risk or where an increased level of risk has been accepted by management. Watchdog Timer An independent, external timer that ensures the computer cannot enter an infinite loop. Watchdog timers are normally reset by the computer program. Expiration of the timer results in generation of an interrupt, program restart, or other function that terminates current program execution. Acronyms Acronym Term BIT Built-In Test BITE Built-In Test Equipment C&W Caution and Warning CASE Computer Aided Software Engineering CDR Critical Design Review CFD Control Flow Diagram COTS Commercial Off-the-Shelf CSA Code Safety Analysis CSC Computer Software Component CSCI Computer Software Configuration Item CSU Computer Software Unit DFD Data Flow Diagram DID Data Item Description DLA Design Logic Analysis DoD Department of Defense FDIR Fault Detection, Isolation, and Recovery FMEA Failure Modes and Effects Analysis FQR Formal Qualifications Review GFE Government Furnished Equipment GSE Ground Support Equipment HOL Higher Order Logic HRI Hazard Risk Index ICD Interface Control Document IOS International Organization for Standardization ISO from the Greek root isos meaning equal or standard (not an acronym). ISO standards are published by IOS IV&V Independent Verification and Validation JPL Jet Propulsion Laboratory, Pasadena, California MIL-STD Military Standard NDI Non-Developmental Item NHB NASA Handbook NDS Non-Developmental Software NMI NASA Management Instruction NPD NASA Policy Directive NPG NASA Procedures and Guidelines NSTS National Space Transportation System OO Object Oriented OOA Object Oriented Analysis OOD Object Oriented Development QA Quality Assurance PDL Program Description Language PHA Preliminary Hazard Analysis POST Power On Self Test S&MA Safety and Mission Assurance SA Software Assurance SADA Safety Architectural Design Analysis SCCSC Safety Critical Computer Software Component SDDA Safety Detailed Design Analysis SEE Software Engineering Environment SIS Software Interface Specification SPA Software Product Assurance SQA Software Quality Assurance SRD Software Requirements Document SRR Software Requirements Review SRS Software Requirements Specifications SSR Software Specification Review SSRA Software Safety Requirements Analysis TRR Test Readiness Review APPENDIX B Software Fault Tree Analysis (SFTA) This section is provided to assist systems safety engineers and software developers with an introductory explanation of the Software Fault Tree Analysis technique. Most of the information presented in this entry is extracted from Leveson et al.12,31. It is possible for a system to meet requirements for a correct state and to also be unsafe. It is unlikely that developers will be able to identify, prior to the fielding of the system, all correct but unsafe states which could occur within a complex system. In systems where the cost of failure is high, special techniques or tools such as Fault Tree Analysis (FTA) need to be used to ensure safe operation. FTA can provide insight into identifying unsafe states when developing safety critical systems. Fault trees have advantages over standard verification procedures. Fault trees provide the focus needed to give priority to catastrophic events, and they assist in determining environmental conditions under which a correct or incorrect state becomes unsafe. FTA was originally developed in the 1960's for safety analysis of the Minuteman missile system. It has become one of the most widely used hazard analysis techniques. In some cases FTA techniques may be mandated by civil or military authorities. B.1 Software Fault Tree Analysis Description The fault tree handbook from the U.S. Nuclear Regulatory Commission gives the following description of the technique [1]: "Fault tree analysis can be simply described as an analytical technique, whereby an undesired state of the system is specified (usually a state that is critical from a safety standpoint), and the system is then analyzed in the context of its environment and operation to find all credible ways in which an undesired event can occur. The fault tree is a graphic model of the various parallel and sequential combinations of faults (or system states) that will result in the predefined undesired event. A fault tree thus depicts the logical relationships of basic events that lead to the undesired event  which is the top event of the fault tree". A sample fault tree is shown in Figure B 1 : SFTA Graphical Representation Symbols. B.2 Goal of Software Fault Tree Analysis SFTA is a technique to analyze the safety of a software design. The goal of SFTA is to show that the logic in a software design or in an implementation (actual code) will not produce a hazard. The design or code is modified to compensate for those failure conditions deemed to be hazardous threats to the system. In this manner, a system with safer operational characteristics is produced. SFTAs are most practical to use when we know that the system has relatively few states that are hazardous Developers typically use forward inference to design a system. That is, their analysis focuses on generating a next state from a previously safe state. The software is developed with key assumptions about the state of the system prior to entering the next state. In complex systems that rely on redundancy, parallelism, or fault tolerance, it may not be feasible to go exhaustively through the assumptions for all cases. The SFTA technique provides an alternative perspective that uses backward inference. The experience from projects that have employed SFTA shows that this change of perspective is crucial to the issue of finding safety errors. The analyst is forced to view the system from a different perspective, one that makes finding errors more apparent. SFTA is very useful for determining the conditions under which fault tolerance and fail safe procedures should be initiated. The analysis can help guide safety engineers in the development of safety critical test cases by identifying those areas most likely to cause a hazard. On larger systems, this type of analysis can be used to identify safety critical software modules, if they have not already been identified. SFTA is language independent and can be applied to any programming language (high level or low level) as long as the semantics are well defined. The SFTA is an axiomatic verification where the postconditions describe the hazard rather than the correctness condition [3]. This analysis shows that, if the weakest precondition is false, the hazard or postcondition can never occur and conversely, if the precondition is true, then the program is inherently unsafe and needs to be changed. Software fault trees should not be reviewed in isolation from the underlying hardware, because to do so would deny a whole class of interface and interaction problems. Simulation of human failure such as operator mistakes can also be analyzed using the SFTA technique. The symbols used for the graphical representation of the SFTA, to a large extent, have been borrowed from the hardware fault tree set (see Figure B-1: SFTA Graphical Representation Symbols) [2]. This facilitates the linking of hardware and software fault trees at their interfaces to allow the entire system to be analyzed. The SFTA makes no claim as to the reliability of the software. When reusing older modules, a new safety analysis is necessary because the fundamental safety assumptions used in the original design must be validated in the new environment. The assertion that highly reliable software is safe is not necessarily true. In fact safety and reliability at times run counter to each other. An example of this conflict can be found in the actual experience of air traffic controllers from the U.S. who attempted to port an air traffic control software application from the U.S. to Britain. The U.S. software had proved to be very reliable but certain assumptions had been made about longitude (i.e., no provision for both east and west coordinates) that caused the map of Britain to fold in half at the Greenwich meridian [3]). SFTA is not a substitute for the integration and test procedures that verify functional system requirements. The traditional methods that certify that requirements are correct and complete will still need to be used. The SFTA helps provide the extra assurance that is required of systems that are either safetycritical or very costly by verifying that safety axioms have been implemented through a rigorous analysis of those software modules that are responsible for the safety controls of the system. Two examples of the application of the SFTA technique illustrate that it is cost effective and helps improve the robustness of a design. SFTA techniques have been applied with success on the Canadian Nuclear Power Plant shutdown software. The software consisted of approximately 6000 lines of Pascal and Fortran code [3]. Although no errors were detected in SFTA's, the changes implemented improved the robustness of the system. The increased robustness was achieved by inserting run time assertions to verify safe operating conditions. Another example of an application of SFTA was on the spacecraft called FIREWHEEL (NASA/ESA). This spacecraft had an Intel 8080 assembly language program of approximately 1200 lines of code that controlled flight and telemetry. The code had already been extensively tested when the SFTA techniques were applied. This analysis discovered that an unanticipated environment hazard could have resulted in the loss of the craft [2]. B.3 Use of Software Fault Tree Analysis Any SFTA must be preceded by a hazard analysis of the entire system. The information in the hazard analysis identifies those undesired events in the system that can cause serious consequences. It should be noted that in complex systems not all hazards can be predetermined. In this respect the technique does not claim to produce consistent results irrespective of the analyst. It is dependent on the judgment of the individual as to when to stop the process and which hazards to analyze. The SFTA can be used at different stages of the software life cycle. The earliest stage where the technique should be used is Preliminary Design (if, at this point, the design still has excessive TBDs, then the technique is ineffective). In practice it will be used most frequently at the code level, preferably prior to integration and test. The basic procedure in an SFTA is to assume that the hazard has occurred and then to determine its set of possible causes. The technique is useless if one starts with the overly generalized hazard "system fails". A more specific failure, such as those identified from the earlier hazard analysis, has to be the starting point for the analysis. The hazard is the root of the fault tree and its leaves are the necessary preconditions for the hazard to occur. These preconditions are listed in the fault tree and connected to the root of the tree via a logical AND or logical OR of the preconditions (see Figure B 2: Example of High Level Fault Tree). In turn, each one of the preconditions is expanded in the same fashion as the root fault (we identify the causes of each precondition). The expansion continues until all leaves describe events of computable probability or the event cannot be analyzed further. The analysis also stops when the precondition is a hardware malfunction that has no dependency on software. The fault tree is expanded from the specified system level failure to the software interface level where we have identified the software outputs or lack of them that can adversely affect system operation. At this stage the analysis begins to take into account the behavior specific to the language. The language constructs can be transformed into templates using preconditions, postconditions and logical connectives. (For templates of Ada constructs, see Leveson et al. [3].) All the critical code must be traced until all conditions are identified as true or false or an input statement is reached. The technique will be illustrated with an example using a Pascal like language [2]. The code will be analyzed for the occurrence of the variable Z being output with a value greater than 100. We should assume B, X, Z are integers. While B>Xdo begin B :=B 1; Z := Z + 10; end if Z ~ 100 then output Z; In this piece of code there are assignment statements, an "if" and a "while" construct. The templates for these statements will be applied, starting from the occurrence of the event we are searching for "output Z with Z > 100". Refer to Figure B 3 : Example Code Fault Tree for the discussion that follows. The templates for the constructs will be drawn showing all the considerations that are required for the analysis to be complete. Some leaves of the tree are not expanded further because they are not relevant to the event or postcondition that we are analyzing. The "if" template shows that the event is triggered by the "then" clause. This follows from the condition in the "if" statement. At this point we need to determine the preconditions necessary for Z > 100 prior to the entry into the while construct. In this example we have only two simple assignments within the "while" construct but they could be replaced by more complex expressions. The analysis would still be similar to that shown here in the example. The "while" construct would be analyzed as a unit and the expressions within the "while" would generate a more complex tree structure as previously described using the language templates to determine the preconditions. By analysis of the transformations in the "while" loop, we arrive at the conclusion that for the Z > 100 to be output, the weakest precondition at the beginning of the code was that for B > X, Z + 1 OB  10X > 100. At this point we have identified the weakest condition necessary for this code to output Z with Z > 100. More detailed examples are provided in references [1] and [2]. Anyone interested in applying the technique should study the examples in the two references or other articles where the technique is illustrated. The analysis that was shown in the section above determined the preconditions for the event to occur. One way to preclude a hazard from happening is to place an assertion in the code that verifies that the precondition for the hazard, as determined in the analysis, does not occur. SFTAs point out where to place assertions and the precondition to assert. If the preconditions do occur, some corrective action needs to take place to remedy the problem or, if a remedy is not possible, to mitigate the consequences. Typically a small percentage of the total software effort on projects will be spent on safety critical code. The Canadian Nuclear Power Plant safetycritical shutdown software was reviewed via the SFTA technique in three work months. The cost of this technique is insignificant considering the total amount spent on testing and verification. Full functional verification of the same software took 30 work years [3]. In cases where no problems are found, the benefits can still justify the investment. The resulting code is made more robust by the inclusion of the safety assertions and the analysis verifies that major hazardous states identified have been avoided. Due to complexity, the figures from the example cited above (3 work months for 6K lines of code) will probably not scale up. The technique can be selectively applied to address only certain classes of faults in the case where a large body of safetycritical code requires a safety verification. B.4 Benefits Of Software Fault Tree Analysis Overall, the benefits of carrying out an SFTA are well worth the small investment that is made at either the design or code stage, or at both stages. SFTAs can provide the extra assurance required of safetycritical projects. When used in conjunction with the traditional functional verification techniques, the end product is a system with safer operational characteristics than prior to the application of the SFTA technique. Figure B-1: - SFTA Graphical Representation Symbols  Figure B-2: - Example of High Level Fault Tree  Figure B-3: - Example Code Fault Tree  APPENDIX C Software Failure Modes and Effects Analysis This section is provided to assist systems safety engineers and software developers with an introductory explanation of the Software Failure Modes and Effects Analysis technique. The information presented here is part of the NASA Safety Critical Software Analysis course. Failure Modes and Effects Analysis (FMEA) is a bottom-up method used to find potential system problems while the project is still in the design phase. Each component in the system is examined, and all the ways it can fail are listed. Each possible failure is traced through the system to see what effects it will have, and whether it can result in a hazardous state. The likelihood of the failure is considered, as well as the severity of the system failure. FMEA has been used by system safety and other engineering disciplines since the 1960s. The methodology has been extended to examine the software aspects of a system (SFMEA). C.1 Terminology A failure is the inability of a system or component to perform its required functions within specified performance requirements. An event that makes the equipment deviate from the specified limits of usefulness or performance is also a failure. Failures can be complete, gradual, or intermittent. A complete system failure is manifested as a system crash or lockup. At this juncture, the system is usually unusable in part, or in whole, and may need to be restarted as a minimum. - What precautions are needed to guard against this, if it is inevitable, then what can be done to insure the system is safe and can recover safely. A gradual system failure may be manifested by decreasing system functionality. Functions may start to disappear and others follow or, the system may start to degrade (as in the speed with which functions are executed may decrease). Often resource management is a fault here, the CPU may be running out of memory or time slice availability. Intermittent failures are some of the most frustrating and difficult to solve. Some of these may be cyclical or event driven or some condition periodically occurs which is unexpected and/or non-predictive. Usually an unrealized path through the software takes place under unknown conditions. These types of failures should be kept in mind when considering failure modes (described below). Unlike most hardware failures, software faults dont usually manifest as hard (complete lockup of the system) type system failures. Software doesnt wear out and break. It is either functional, or already broken (but no one knows it)! A Failure Mode is defined as the type of defect contributing to a failure (ASQC); the physical or functional manifestation of a failure (IEEE Std 610.12-1990). The Failure Mode is generally the manner in which a failure occurs and the degree of the failures impact on normal required system operation. Examples of failure modes are: fracture (hardware), value of data out of limits (software), and garbled data (software). The Failure Effect is the consequence(s) a failure mode has on the operation, function, or status of an item or system. Failure effects are classified as local effects (at the component), next higher level effects (portion of the system that the component resides in), and end effect (system level). C.2 Why do an SFMEA? SFMEAs identify key software fault modes for data and software actions. It analyzes the effects of abnormalities on other components in the system, and on the system as a whole. This technique is used to uncover system failures from the perspective of the lowest level components. It is a bottom-up (or forward) analysis, propagating problems from the lowest levels, up to a failure within the broader system. Software Fault Tree Analysis (SFTA, HYPERLINK \l "_APPENDIX_B"Appendix B) is a top down (or backward) approach. It identifies possible system failures and asks what could have caused them. SFTA looks backwards from the failure to the component(s) whose defects could cause or contribute to the failure. The SFMEA asks What is the effect if this component operates incorrectly? Failures for the component are postulated, and then traced through the system to see what the final result will be. Not all component failures will lead to system problems. In a good defensive design, many errors will already be managed by the error-handling part of the design. A Software FMEA takes a systems approach, analyzing the softwares response to hardware failures and the effects on the hardware of anomalous software actions. Doing an FMEA is done on software can identify: Hidden failure modes, system interactions, and dependencies Unanticipated failure modes Unstated assumptions Inconsistencies between the requirements and the design SFMEAs are not a panacea. They will not solve all of your problems! You will probably not get all of the above results, but you should be a lot closer to a clean system than if you had not done the analysis. Its important to interact with other members of the team as you perform an SFMEA. No one person understands all components, software or hardware. Have hardware and software designers/engineers review your analysis as you are performing it. Their point of view will help uncover the hidden assumptions or clarify the thought process that led to a requirement or design element. SFMEA is not a silver bullet, but a tool to hedge your bets (reduce your risk). C.3 Issues with SFMEA If SFMEAs are so wonderful, why isnt everyone doing them? The problems are the technique is: Time consuming Tedious Manual method (for now) Dependent on the knowledge of the analyst Dependent on the accuracy of the documentation Questionable benefit of incomplete failure modes list The place to reap the greatest advantages of this technique is in requirements and design analysis. This may take some time, but it is well worth the effort in terms of the different perspectives with which youll be able to view the project (hardware, software, operations, etc.). The technique is considered tedious by some. However, the end result is greater and more detailed project and/or system knowledge. This is most true when used earlier (requirements and design) in the life-cycle. It is easier to use SFMEA later in the project, since components and their logical relationships are known, but at this point (i.e. detailed design and implementation) it is often too late (and expensive) to affect the requirements or design. Early in the project, lower level components are conjecture and may be wrong, but this conjecture can be used to drive out issues early. There must be balance in the approach. There is no value in trying to perform analysis on products that are not ready for examination. The technique is dependent on how much the analyst knows and understands about the system. However, as mentioned earlier, the technique should be helpful in bringing out more information as it is being used. Include more reviewers who have diverse knowledge of the systems involved. In addition to looking at the project from different angles, the diversity of background will result in a more keen awareness of the impact of changes to all organizations. Documentation is also very important to using this analysis technique. So, when reviewing documents, use many and different types of resources (systems and software engineers, hardware engineers, system operations personnel, etc.), so that differing perspectives have been utilized in the review process. The obvious benefit is a better product as a result of critique from numerous angles. Again, dont work in a vacuum! Communication is paramount to success. Where should you use the SFMEA technique? All of the following areas, though you should focus on the safety-critical aspects. Single Failure Analysis Multiple Failure Analysis Hardware/Software Interfaces Requirements Design Detailed Design C.4 The SFMEA Process Figure C-1  EMBED MSPhotoEd.3  FMEA analysis begins at the bottom (the end items). Figure C-1 shows a subsystem, indicating how each piece interacts with the others. Logic (ands and ors) is not included on this introductory diagram. The end items are the pressure sensor and temperature sensor. The diagram shows how the failures propagate up through the system, leading to a hazardous event. Software FMEAs follow the same procedure used for hardware FMEAs, substituting software components for the hardware. Alternately, software could be included in the system FMEA, if the systems/reliability engineer is familiar with software or if a software engineer is included in the FMEA team. MIL-STD-1629 is a widely used FMEA procedure, and this appendix is based on it. To perform a Software Failure Modes and Effects Analysis (SFMEA), you identify: Project/system components Ground rules, guidelines, and assumptions Potential functional and interface failure modes Each failure mode in terms of potential consequences Failure/fault detection methods and compensating provisions Corrective design or actions to eliminate or mitigate failure/fault Impacts of corrective changes C.4.1 Identify Project/system Components Engineers must know the project, system, and purpose and keep the big picture in mind as they perform the analysis. A narrow perspective can prevent you from seeing interactions between components, particularly between software and hardware. Communicate with those of differing backgrounds and expertise. In performing a FMEA, defining whatever is being worked on is the first order of business. The whatever can be a project, system, subsystem, unit, or some other piece of the puzzle. Depending on where the project is in the development life-cycle (requirements, design, implementation), you will hopefully have some documents to work with. If the documentation is lacking, you will have to do some detective work. Often there is a collection of semi-formal paperwork on the requirements or design produced by the software team but not written into a formal requirements or design document. Look for a Software Development Folder, talk with the developers, and accumulate whatever information you can. If little is on paper, you will have to interview the developers (and project management, hardware engineers, systems people, etc.) to create your own documentation. Once you know what the system is and what it is supposed to do, its time to start breaking down the system into bite size chunks. Break a project down into its subsystems. Break a subsystem down into its components. This process begins with a high level project diagram which consists of blocks of systems, functions, or objects. Each block in the system will then have its own diagram, showing the components that make up the block (subsystem). This is a lot of work, but you dont usually have to do the whole project! Not every subsystem will need to be detailed to its lowest level. Deciding what subsystems need further breakdown comes with experience. If in doubt, speak with the project members most familiar with the subsystem or component. During the requirements phase, the lowest-level components may be functions or problem domains. At the preliminary (architectural) design phase, functions, Computer Software Configuration Items (CSCIs), or objects/classes may be the components. CSCIs, units, objects, instances, etc. may be used for the detailed design phase. Take the blocks youve created and put them together in a diagram, using logic symbols to show interactions and relationships between components. You need to understand the system, how it works, and how the pieces relate to each other. Its important to lay out how one component may affect others, rippling up through the system to the highest level. Producing this diagram helps you, the analyst, put the information together. It also provides a common ground when you are discussing the system with other members of the team. They can provide feedback on the validity of your understanding of the system. C.4.2 Ground Rules Before you begin the SFMEA, you need to decide what the ground rules are. There are no right or wrong rules, but you need to know ahead of time what will be considered a failure, what kinds of failures will be included, levels of fault-tolerance, and other information. Some sample ground rules are: All failure modes are to be identified at the appropriate level of detail: component, subsystem, and system. Each experiment mission shall be evaluated to determine the appropriate level of analysis required. The propagation of failure modes across interfaces will be considered to the extent possible based on available documentation. Failures or faults resulting from defective software (code) shall be analyzed to the function & object level during detailed design. Failure modes induced by human error shall not be included in this FMEA. The criticality categorization of a hardware item failure mode shall be made on the basis of the worst case potential failure effect. Identical Items which perform the same function, in the same environment (where the only difference is location) will be documented on a worksheet only once provided that the failure mode effects are identical. Containment structures such as combustion chambers and gas cylinders will be analyzed. For catastrophic hazards, dual component failures ( items which are one-fault tolerant) are credible. For catastrophic hazards, triple component failures (items with two-fault tolerance) are not credible. For critical hazards, single component failures are credible. For critical hazards, dual component failures are not credible Release of the contents in a single containment gas bottle does not constitute a hazard of any kind provided that the gases released are pre-combustion gases.(e.g., flammability, toxicity, 02 depletion) Items exempt from failure modes and effects analysis are: tubing, mounting brackets, secondary structures, electrical wiring, and electronic enclosures. Besides the ground rules, you need to identify and document the assumptions youve made. You may not have sufficient information in some areas, such as the speed at which data is expected at an interface port of the system. If the assumption is incorrect, when it is examined it will be found to be false and the correct information will be supplied (sometimes loudly). This examination will occur when you describe what you believe to be the normal operation of the system or how the system handles faults to the other project members. Dont let assumptions go unwritten. Each one is important. In other words, ASSUME NOTHING unless you write it down. Once written, it serves as a focus to be further explored and exploded. Try to think outside the box beyond the obvious. Look at the project as a whole, and then at the pieces/parts. Look at the interactions between components, look for assumptions, limitations, and inconsistencies. Figure C-2  EMBED MSPhotoEd.3  Figure C-2 shows the process of recognizing your assumptions, documenting them, finding out what the reality is, and clarifying them for future reference. C.4.3 Identify Failures Once you understand the system, have broken it into components, created ground rules, and documented your assumptions, its time to get to the fun part: identifying the possible failures. Failures can be functional (it doesnt do what it was supposed to do), undesirable responses to bad data or failed hardware, or interface related. Functional failures will be derived from the Preliminary Hazard Analysis (PHA) and subsequent Hazard Analyses, including subsystem HAs. There will probably be hardware items on this list. This analysis looks at softwares relationship to hardware. It is important to identify functions that need protecting. These functions are must work functions and must not work functions. A failure may be the compromise of one of these functions by a lower-level software unit. There are also interfaces to be dealt with. There are more problems identified with interfaces, according to some researchers, than any other aspect of software development. Interfaces are software-to-software (function calls, interprocess communication, etc.), software-to-hardware (e.g. setting a Digital-to-Analog port to a specified voltage), hardware-to-software (e.g. software reads a temperature sensor), or hardware-to-hardware. SFMEAs deal with all of these except the hardware-to-hardware interfaced. These are included in the system FMEA. Interfaces also (loosely) include transitions between states or modes of operation. As you look at the system, you will find that you need to make more assumptions. Write them down. When all else fails, and there is no place to get useful information, sometimes a guess is in order. Again, write it down and go discuss it with others. The others should include people outside of your area of expertise. If you are a software person, go talk with safety and systems. If you are a safety specialist, talk with systems, software, and reliability experts. C.4.3.1 Examination of Normal Operations as Part of the System The normal operations of the system include it performing as designed, being able to handle known problem areas, and its fault tolerance and failure response (if designed into the system). Hopefully, the system was designed to correctly and safely handle all anticipated problems. The SFMEA will find those areas where unanticipated problems create failures. This step identifies how the software responds to the failures. This step validates the sufficiency, or lack thereof, of the product to do what its supposed to do. This has the side affect of confirming the product developers understanding of the problem. In order to understand the operation of a system it may be necessary to work and communicate with systems engineering if you are a software engineer. Systems engineering must also communicate with software engineering, and both must talk with safety and Software Assurance (SA). The normal operation of the software as part of the system or function is described in this part of the SFMEA . C.4.3.2 Identify Possible Areas for Faults Areas to examine for possible faults include: Data Sampling Rate. Data may be changing more quickly than the sampling rate allows for, or the sampling rate may be too high for the actual rate of change, clogging the system with unneeded data. Data Collisions. Examples of data collisions are: transmission by many processors at the same time across a LAN, modification of a record when it shouldn't be because of similarities, and modification of data in a table by multiple users in an unorganized manner. Command Failure to Occur. The command was not issued or not received. Command out of sequence. There may be an order to the way equipment is commanded on (to an operational state). For instance, it is wise to open dampers to the duct work going to the floors, as well as the dampers to bring in outside air before turning on the air handling units of a high rise office building. Illegal Command. Transmission problems or other causes may lead to the reception of an unrecognized command. Also, a command may be received that is illegal for the current program state. Timing. Dampers take a long time to open (especially the big ones) so, timing is critical. A time delay would be necessary keep from imploding (sucking in) the outside air dampers or possibly exploding the supply air dampers, by turning on the air handler prematurely. Safe Modes. It is sometimes necessary to put a system which may or may not have software in a mode in where everything is safe (i.e. nothing melts down or blows up). Or the software maintains itself and other systems in a hazard free mode. Multiple Events or Data. What happens when you get the data for the same element twice, within a short period of time? Do you use the first or second value? The Improbable. The engineers or software developers will tell you that something cant happen. Try to distinguish between truly impossible or highly improbable failures, and those that are unlikely but possible. The improbable will happen if you dont plan for it. These are all sorts of things that software can do to cause system or subsystem failures. Not every software fault will lead to a system failure or shutdown, and even those failures that occur may not be safety critical. There are lots more types of faults than these, but these are good start when looking for things that can go wrong. C.4.3.3 Possible Failure Modes Identify the possible failure modes and effects in an Events Table and Data Table, included in HYPERLINK \l "_C.4.8_Example_forms"Section C.4.8. Examples of failure modes are: Hardware Failures/Design Flaws Broken sensors lead S/W down wrong path No sensors or not enough sensors - dont know what H/W is doing Stuck valves or other actuators Software Memory over written (insufficient buffer or processing times). Missing input parameters, incorrect command, incorrect outputs, out of range values, etc. Unexpected path taken under previously unthought of conditions. Operator Accidental input of unknown command, or proper command at wrong time. Failure to issue a command at required time. Failure to respond to error condition within a specified time period. Environment Gamma Radiation EMI Cat hair in hard drive Power fluctuations C.4.3.4 Start at the Bottom Go back to the block diagrams you created earlier. Starting at the lowest level, look at a component and determine the effect of that component failing, in one of its failure modes, on the components in the level above it. You may need to consider the effect of this component and all the effected components at the next higher level as well. This must be worked all of the way up the chain. This is a long process. However, if the safety critical portions are fairly isolated in a system, then the analyst will be looking at only those parts of the system that can lead to a critical failure. This is true for the detailed design and implementation phases/versions of this analysis. For the requirements and preliminary design phases, the system is more abstract (and therefore smaller and more manageable). C.4.4 Identify Consequences of each Failure The next thing to look at is the effect (consequences) of the defined faults/failures. It is also important to consider the criticality or severity of the failure/fault. So far in the FMEA process, weve concentrated on the safety perspective. However, its time to look at reliability as well. Like safety , reliability, looks at: Severity may be catastrophic, critical, marginal, or negligible. Likelihood of occurrence may be probable, occasional, remote or improbable. Risk levels are defined as 1 through 5, with 1 being prohibitive (i.e. not allowed-must make requirements or design change). The critically categories include the added information of whether the component or function has redundancy or would be a single point of failure. For each project and center there may be some variation in the ranking of severity level and risk level. This is, after all, not an exact science so much as a professional best guess (best engineering judgment). The relationship between reliabilitys criticality categories and the safety risk level is shown in the following table: Criticality CategoryRelative Safety Risk Level1 A single failure point that could result in a hazardous condition, such as the loss of life or vehicle.Levels 1 to 21R Redundant components/items for which, if all fail, could result in a hazardous condition.Levels 1 to 22 A single failure point that could result in a degree of mission failure (the loss of experiment data)Levels 2 to 32R Redundant items, all of which if failed could result in a degree of mission failure (the loss of experiment data).Levels 2 to 33 All others.Levels 4 and 5C.4.5 Detection and Compensation At this step, you need to identify the methods used by the system to detect a hazardous condition, and provisions in the system that can compensate for the condition. For each failure mode, a fault/failure detection method should be identified. A failure detection mechanism is a method by which a failure can be discovered by an operator under normal system operation or by some diagnostic. Failure detection in hardware is via sensing devices or instruments. In software this could be done by error detection software on transmitted signals, data or messages, memory checks, initial conditions, etc. For each failure mode, a compensating provision should be identified, or the risk accepted if it is not a hazardous failure. Compensating provisions are either design provisions or operator actions which circumvent or mitigate. This step is required to record the true behavior of the item in the presence of an internal malfunction of failure. A design provision could be a redundant item or a reduced function that allows continued safe operation. An operator action could be the notification at an operator console to shut down the system in an orderly manner. An example: The failure is the loss of data because of a power loss (hardware fault), or because other data overwrote it (a software fault) . Detection: A critical source and CPU may be backed up by a UPS (uninterruptible power supply) or maybe not. Detect that power was lost and the system is now on this backup source. Mark data at time x as not reliable. This would be one detection scheme. Compensation for the occurrence of this failure: Is there another source for that data.? Can it be re-read? Or just marked as suspect or thrown out and wait for next normal data overwrite it? What of having a UPS, battery backup, redundant power supply? Of course these are all hardware answers. Can software detect if the data is possibly suspect and tag it or toss it, wait for new input, request for new input, get data from alternate sources, calculate from previous data (trend) etc.? What if input data comes in faster than expected and was overwriting pervious data before it was processed. How would this system know? What could be done about it? For example, a software system normally receives data input cyclically from 40 sources, then due to partial failures or maintenance mode, now only 20 sources are in cycles and the token is passed 2 times faster. Can buffers handle the increased data rate? C.4.6 Design Changes After a critical hazard has been identified, the project needs to Identify corrective actions Identify changes to the design Verify the changes Track all changes to closure After a critical hazard has been identified it is usually eliminated or mitigated. The result of either of these two actions is a corrective action. This corrective action may be via documented new requirements, design, process, procedure, etc. Once implemented, it must be analyzed and verified to correct the failure or hazard. It is important to look at the new design, once the change is made, to verify that no new hazards have been created. C.4.7 Impacts of Corrective Changes A corrective action will have impact. Impacts can be to the schedule, design, functionality, performances, process, etc. If the corrective action results in a change to the design of the software, then some segment of that software will be impacted. Even if the corrective action is to modify the way an operator uses the system there is impact. You need to go back and analyze the impact of the changes to the system or operating procedures to be sure that they (singularly or jointly) dont have an adverse effect and do not create a new failure mode for a safety critical function or component. Often fixes introduce more errors and there must be a set process to insure this does not occur in safety critical systems. Ensure that verification procedures cover the effected areas. C.4.8 Example forms This worksheet is used to gather relevant information on the system. It is also a great place to put the data developed during the analysis. The ID number can be a drawing number, work break down structure number, CSCI identification, or other identification value. FMEA Worksheet ITEM DescriptionID #SUBSYSTEM COMPONENTLOCAL FAILURE MODE/EFFECTSYSTEM EFFECTCRIT Once elements of the system are identified, list them in this worksheet and identify their functions. COMPONENTS ITEM DESCRIPTIONITEM IDFUNCTIONFAILURE MODELOCAL EFFECTSYSTEM EFFECTDETECTABILITYCRIT For a Software FMEA, the Data Table is used to list the effects of bad data on the performance of the system or process being analyzed. A Data Item can be an input, output, or information stored, acted on, passed, received, or manipulated by the software. The Data Fault Type is the manner in which a flaw is manifested (bad data), including data that is out of range, missing, out of sequence, overwritten, or wrong. SFMEA DATA TABLE ModeData ItemData Fault TypeDescriptionEffect (local and system)Crit The Events Table is used to list the effects of an event being performed. The Event Item is the occurrence of some action, either within the software or performed on hardware or other software. An event can be an expected and correct, expected but incorrect, unexpected and incorrect, or unexpected but correct action. Event Fault Types can occur locally (with a module) or on the system as a whole. Types can include halt (abnormal termination), omission (failure of the event to occur), incorrect logic/event, or timing/order (wrong time or out of sequence). SFMEA EVENTS TABLE ModeEvent ItemEvent Fault TypeDescriptionEffect (local and system)Crit APPENDIX D Requirements State Machines D.1 Characteristics of State Machines A formal description of state machines can be obtained from texts on Automata Theory. This description will only touch on those properties that are necessary for a basic understanding of the notation and limitations. State machines use graph theory notation for their representation. A state machine consists of states and transitions. The state represents the condition of the machine and the transition represent changes between states. The transitions are directed (direction is indicated by an arrow), that is, they represent a directional flow from one state to another. The transition from one state to another is induced by a trigger or input that is labeled on the transition. Generally an output is produced by the state machine [38]. The state machine models should be built to abstract different levels of hierarchy. The models are partitioned in a manner that is based on considerations of size and logical cohesiveness. An uppermost level model should contain at most 15 to 20 states; this limit is based on the practical consideration of comprehensibility. In turn, each of the states from the original diagram can be exploded in a fashion similar to the bubbles in a data flow diagram/control flow diagram (DFD/CFD) (from a structured analysis/structured design methodology) to the level of detail required [39]. An RSM model of one of the lower levels contains a significant amount of detail about the system. The states in each diagram are numbered and classified as one of the following attributes: Passive, Startup, Safe, Unsafe, Shutdown, Stranded and Hazard (see Figure 5-4 Example of State Transition Diagram). For the state machine to represent a viable system, the diagram must obey certain properties that will be explained later in this work. The passive state represents an inert system, that is, nothing is being produced. However, in the passive state, input sensors are considered to be operational. Every diagram of a system contains at least one passive state. A passive state may transition to an unsafe state. The startup state represents the initialization of the system. Before any output is produced, the system must have transitioned into the startup state where all internal variables are set to known values. A startup state must be proven to be safe before continuing work on the remaining states. If the initialization fails, a time-out may be specified and a state transition to an unsafe or passive state may be defined. Figure D-1 Example of State Transition Diagram  The shutdown state represents the final state of the system. This state is the only path to the passive state once the state machine has begun operation. Every system must have at least one shutdown state. A time-out may be specified if the system fails to close down. If a timeout occurs, a transition to an unsafe or stranded state would be the outcome. Transition to the shutdown state does not guarantee the safety of the system. Requirements that stipulate safety properties for the shutdown state are necessary to insure that hazards do not occur while the system is being shutdown. A safe state represents the normal operation of the system. A safe state may loop on itself for many cycles. Transitions to other safe states is a common occurrence. When the system is to be shutdown, it is expected to transition from a safe state to the shutdown state without passing through an unsafe state. A system may have zero or more safe states by definition. A safe state also has the property that the risk of an accident associated with that state is acceptable (i.e., very low). Unsafe states are the precursors to accidents. As such, they represent either a malfunction of the system, as when a component has failed, or the system displays unexpected and undesired behavior. An unsafe state has an unacceptable, quantified level of risk associated with it from a system viewpoint. The system is still in a controllable state but the risk of transition to the hazard state has increased. Recovery may be achieved through an appropriate control action that leads either to an unsafe state of lesser risk or, ideally, to a safe state. A vital consideration when analyzing a path back to a safe state is the time required for the transitions to occur before an accident occurs. A system may have zero or more unsafe states. The hazard state signals that control of the system has been lost. In this situation the loss of the system is highly probable and there is no path to recovery. The hazard state should take action where possible to contain the extent of damage. The stranded state represents the situation, where during the course of a shutdown operation, the system has lost track of state information and cannot determine a course of action. This state has a high potential to transition to an unsafe state after a specified time depending upon what system is modeled or possibly upon environmental conditions. The only recovery from this state is a power-on restart. D.2 Properties of Safe State Machines There are certain properties that the state machine representation should exhibit in order to provide some degree of assurance that the design obeys certain safety rules. The criteria for the safety assertions are based on logical considerations and take into account input/output variables, states, trigger predicates, output predicates, trigger to output relationship and transitions. D.3 Input/Output Variables All information from the sensors should be used somewhere in the RSM. If not, either an input from a sensor is not required or, more importantly, an omission has been made from the software requirements specification. For outputs it can be stated that, if there is a legal value for an output that is never produced, then a requirement for software behavior has been omitted [40]. D.4 State Attributes The state attributes of the RSM are to be labeled according to the scheme in Figure 5-5 Example RSM and Signals. Figure D-2 Example RSM and Signals  D.5 Trigger Predicates A necessary, but not a sufficient condition for a system to be called robust, is that there must always be a way for the RSM to leave every state of the system. This leads us to define two statements about RSMs: Every state in the RSM has a defined behavior (transition) for every possible input. One or more input predicates, out of all possible input predicates, must be able to trigger a transition out of any state. In case there is no input within a specified time, every state must have a defined transition, such as a time-out, that triggers an action. The state machine may also express what actions are taken if the input data is out of range. Low level functions, such as exception handling, may be features that are required for an implementation. A relatively simple method that provides an elementary correctness check is for range verification of input data. The computational cost in most cases will probably not be significant. While range checking does not provide a guarantee of correctness, it is the first line of defense against processing bad data. Obviously, if the input data is out of range, we have identified either a bad sensor or a bad data communication medium.* The RSM technique has limitations when analyzing fault tolerant systems that contain two or more independent lanes. In redundant systems the use of threshold logic may generate another class of safety problems. The area where problems may arise is threshold logic used to validate inputs coming from different sensors. Typically the value read from the different sensors will differ by a certain percentage. Sensors are calibrated to minimize this difference, but a check must be made to verify that neither of the following situations occur: 1) a threshold may trigger one lane of the system and not the other if a value below the threshold is contrasted with a value above the threshold from the other lane; and 2) the input as processed by the control law will generate quantitatively and qualitatively different control actions. This effect can be avoided if a vote is taken at the source of the data before transmitting potentially confusing data. In the case of fully redundant, dual lane systems, each system may determine that the other is in error when in reality there is no hardware or software error. A high level RSM will not show this explicitly but it is an issue that needs to be considered in the design before any prototyping, or worse yet, coding takes place [41]. Timing problems are common causes of failures of real-time systems. Timing problems usually happen because either timing is poorly specified or race conditions that were not thought possible occur and cause an unwanted event to interrupt a desired sequence. All real-time data should have upper and lower bounds in time. Race conditions occur when the logic of a system has not taken into account the generation of an event ahead of the intended time. This type of error occurs when events that should be synchronized or sequenced are allowed to proceed in parallel. This discussion will not address the obvious case of an error in the sequence logic. *A third possibility may also exist: the data may truly be valid, but the understanding of the system or environment state is incomplete and data having values outside of the expected range is regarded as invalid (e.g. data on ozone loss in the atmosphere above Antarctica was regarded as invalid until ground based observations confirmed the situation). The ability to handle inputs will be called capacity and the ability to handle diverse types of input will be called load. A real-time system must have specifications of minimum and maximum capacity and load. Robustness requires the ability of the system to detect a malfunction when the capacity limits have been violated. Capacity limits are often tied to interrupts where hardware and software analyses are necessary to determine if the system can handle the workload (e.g., CPU execution time, memory availability, etc.). Load involves multiple input types and is a more comprehensive measure than capacity. Criteria for the system or process load limits must be specified. For a system to be robust, a minimum load specification needs to be specified, as well as a maximum (assuming that a real-time process control system has inputs of a certain period). The capacity and load constraints as developed for the RSM will help serve as a guide for designing the architecture of the system and subsequently in the final system implementation. These performance requirements have safety implications. The ability of the system to handle periodic capacity and load requirements is a fundamental safety property. If a system cannot handle the work load then the safety of the system is at risk because process control is not performed in a timely fashion. D.6 Output Predicates The details of when an output is valid may not be known at the time the RSM is generated but these constraints should be documented somewhere in the RSM to serve as a guideline for the implementer. In a similar fashion to inputs, outputs must have their value, and upper and lower timing bounds specified. Output capacity is limited by the ability of the actuator to respond. Compatibility must exist between the frequency of reaction to input and the capacity of the output mechanism to respond. This requires that a timing analysis be performed to be certain that potential worst case input and output rate speeds can be adequately handled by both software and hardware. For output data to be valid the input data must be from a valid time range. Control decisions must be based on data from the current state of the system, not on stale data. In the computation of the output, the delay in producing the output must not exceed the permissible latency. An example of an incorrect output timing problem occurred on the F-18 fighter plane. A wing mounted missile failed to separate from the launcher after ignition because a computer program signaled the missile retaining mechanism to close before the rocket had built up enough thrust to clear the missile from the wing. The aircraft went violently out of control, but the missile fuel was eventually expended and the pilot was able to bring the plane under control before a crash occurred [42]. D.7 Degraded Mode Operation When a system cannot meet its work load requirements in the allotted time or unanticipated error processing has consumed processor resources and insufficient time is available for normal processing, the system must degrade in a graceful manner. Responses to graceful degradation include: Masking of nonessential interruptsReduction of accuracy and/or response timeLogging and generation of warning messagesSignals to external world to slow down inputsReduction of processing load (execute only core functionality)Trace of machine state to facilitate post event analysisError handling Which of the above measures get implemented depends on the application and its specific requirements. Where there is load shedding, a degraded mode RSM will exist that exhibits properties that in all likelihood are different from the original RSM. The same analysis that is performed for the RSM of the fully operational system should be done on the degraded mode RSM. Special care must be taken in the implementation of performance degradation that reduces functionality and/or accuracy. A situation can arise where, because of the transition to a state machine with different properties (and therefore, the control laws of the original RSM will be affected by a reduction in accuracy or frequency of the inputs), the outputs may not transition smoothly. In systems where operator intervention is an integral part of operation, this jolt may confuse the operator and contribute to further degradation because of operator inability to predict behavior. In principle, where response time limits can be met, predictability is preferable to abrupt change. In order to recover from degraded mode operation there needs to be a specification of the conditions required to return to normal operations. These conditions must be specific enough to avoid having the system continuously oscillate back and forth between normal and degraded mode. In practice, a minimum delay and a check of the cause of the anomaly can achieve this. D.8 Feedback Loop Analysis Process control models provide feedback to the controller to notify changes in state caused by manipulated variables or internal disturbances. In this manner the system can adjust its behavior to the environment. An RSM can be used to verify if feedback information is used and what signals are employed. If feedback is absent then either the design is incorrect or the requirements are faulty. The design of the system needs to incorporate a mechanism to detect the situation where a change in the input should trigger a response from the system and the response is either too slow, too fast or unexpected. For example, when a command is given to turn on a heater, a resulting temperature rise curve would be expected to follow a theoretical model within certain tolerances. If the process does not respond within a certain period of time then it can be assumed that something is wrong and the software must take an appropriate action. At a minimum, this action should be the logging of the abnormality for future analysis. The simplest, most inexpensive check for a servo loop is to verify if the reference position is different from the actual position. If the difference is non-negligible, some form of control action must be taken. If the actual position does not vary in the presence of a command to act, then it can be concluded that there is a fault in the system. RSMs can be used to help design the control process and to verify that all feedback loops are closed and that they generate the appropriate control action. D.9 Transition Characteristics Requirements may also involve specifications regarding transitions between states. A system may or may not possess certain properties, while some other properties are mandatory. All safe states must be reachable from the initial state. Violation of this principle leads to a contradiction of requirements or a superfluous state. No safe state should ever transition, as a result of a computer control action, to an unsafe state. In principle, an automated (i.e., computer controlled) system should never transition to a hazardous state unless a failure has occurred. In general, if operator action is considered (such as the issuing of a command), the previously stated requirement may be impossible to accomplish given the requirements of certain systems. In this latter situation, the transition into and out of the unsafe state should be done in a manner that takes the least amount of time and the system eventually reverts back to a safe state. Once the system is in an unsafe state, either because of error conditions or unexpected input, the system may transition to another unsafe state that represents a lower risk than the previous state. If it is not possible to redesign the system so that all transitions from a hazardous state eventually end in a safe state, then the approach must be to design the transitions to the lowest possible risk, given the environment. Not all RSM diagrams will be able to achieve an intrinsically safe machine, that is, one that does not have a hazardous state. The modeling process's main virtue lies in the fact that, through analysis of the RSM, faults may be uncovered early in the life cycle. The objective and challenge is to design a system that poses a tolerable level of risk. The design of a robust system requires that, for all unsafe states, all soft and hard failure modes be eliminated. A soft failure mode occurs when an input is required in at least one state through a chain of states to produce an output and that the loss of the ability to receive that input could potentially inhibit the software. A hard failure mode is analogous to a soft failure except that the input is required for all states in the chain and the loss of the input will inhibit the output. If a system allows for reversible commands, then it must check that, for every transition into a state caused by the command, it can transition back to the previous state via a reverse command. While in that state, an input sequence must be able to trigger the deactivation of the command. In a similar fashion, if an alarm indicates a warning and the trigger conditions are no longer true, then the alert should also cease (if appropriate operator acknowledgment action was performed when required). State transitions do not always have to be between different states. Self loops are permissible, but eventually every real-time system must initiate a different function and exit from the self loop. Watchdog timers may be used to catch timeouts for self loops. The RSM technique helps a designer by graphically representing these constraints and assisting in specifying implementation level detail. D.10 Conclusions The RSM techniques described above can be used to provide analysis procedures to help find errors and omissions. Incorporating the RSM analysis into the development cycle is an important step towards a design that meets or exceeds safety requirements. Practically all the current safety oriented methodologies rely on the quality of the analyst(s) for results and the techniques mentioned above are a first attempt at formalizing a system's safety properties. The RSM technique does not claim to guarantee the design of a 100% safe system. Inevitably some faults (primarily faults of omission) will not be caught, but the value of this methodology is in the fact that many faults can be made evident at an early stage, if the right mix of experienced people are involved in the analysis. Complexity of current software and hardware has caused a nonlinear increase in design faults due to human error. For this reason and because testing does not prove the absence of faults, it is recommended that the RSM modeling techniques be employed as early as possible in the system life cycle. The RSM methodology, if applied with system safety considerations, is a valuable step towards a partial proof to show the effects and consequences of faults on the system. If the RSM model is robust and the design can be shown to have followed the criteria in the previous sections, then a significant milestone will have been completed that demonstrates that the system is ready to proceed to the next phase in the life-cycle and developers will have a high level model that satisfies a core set of requirements. From an overall systems perspective, the RSM model is used to provide a high level view of the actual system, and further refinements of the states can give insight into implementation detail. This model is then checked against the rules formulated in the previous sections. Deviation from the rules involves additional risk and, as such, this additional risk should be evaluated and documented. This process of documentation is necessary for a post project analysis to confirm the success of the system or to analyze the cause of any failures. The technique of using RSMs to explore properties of safety critical systems is a highly recommended practice that development teams should follow. Verification of the safety properties of the RSM should be performed as a team effort between software developers, systems safety and software quality assurance. If the RSM analysis or any equivalent technique has not been performed for the design of a complex system, then that project is running the risk that major design constraints will be put aside until late in the development cycle and will cause a significant cost impact. APPENDIX E E.1 Checklists for Off-the-Shelf (OTS) Items Item to considerAnswer or CommentDoes the OTS software fill the need in this system? Is its operational context compatible with the system under development? Consider not only the similarities between the system(s) the OTS was designed for and the current system, but also the differences. Look carefully at how those differences affect operation of the OTS software. How stable is the OTS product? Are bug-fixes or upgrades released so often that the product is in a constant state of flux? How responsive is the vendor to bug-fixes? Does the vendor inform you when a bug-fix patch or new version is available?How compatible are upgrades to the software? Has the API changed significantly between upgrades in the past? Will your interface to the OTS software still work, even after an upgrade?How cutting edge is the software technology? OTS software is often market driven, and may be released with bugs (known and unknown) in order to meet an imposed deadline or to beat the competition to market.Conversely, is the software so well known that it is assumed to be error free and correct? Think about operating systems and language libraries. In a safety critical system, you do not want to assume there are no errors in the software. What is the user base of the software? If it is a general use library, with thousands of users, you can expect that most bugs and errors will be found and reported to the vendor. Make sure the vendor keeps this information, and provides it to the users! Small software programs will have less of a shake down and may have more errors remaining.What level of documentation is provided with the software? Is there more information than just a users manual? Can more information be obtained from the vendor (free or for a reasonable price)?Is source code included, or available for purchase at a reasonable price? Will support still be provided if the source code is purchased or if the software is slightly modified?Can you communicate with those who developed the software, if serious questions arise? Is the technical support available, adequate, and reachable? Will the vendor talk with you if you modify the product?Will the vendor support older versions of the software, if you choose not to upgrade? Many vendors will only support the newest version, or perhaps one or two previous versions.Is there a well-defined API (Application Program Interface), ICD (interface control document), or similar documentation that details how the user interacts with the software? Are there undocumented API functions?What are the error codes returned by the software? How can it fail (return error code, throw an exception, etc.)? Do the functions check input variables for proper range, or it is the responsibility of the user to implement?Can you obtain information on the internals of the software, such as the complexity of the various software modules or the interfaces between the modules? This information may be needed, depending on what analyses need to be performed on the OTS software.Can you get information about the software development process used to create the software? Was it developed using an accepted standard (IEEE 12207, for example)? What was the size of the developer team?What types of testing was the software subjected to? How thorough was the testing? Can you get copies of any test reports?Are there any known defects in the software? Are there any unresolved problems with the software, especially if the problems were in systems similar to yours? Look at product support groups, newsgroups, and web sites for problems unreported by the vendor. However, also keep in mind the source of the information found on the web some is excellent and documented, other information is spurious and incorrect.Were there any analyses performed on the software, in particular any of the analyses described in  HYPERLINK \l "_5._SOFTWARE_SAFETY_1" section 5? Formal inspections or reviews of the code? How compatible is the software with your system? Will you have to write extensive glueware to interface it with your code? Are there any issues with integrating the software, such as linker incompatibility, protocol inconsistencies, or timing issues?Does the software provide all the functionality required? How easy is it to add any new functionality to the system, when the OTS software is integrated? Will the OTS software provide enough functionality to make it cost-effective?Does the OTS-to-system interface require any modification? For example, does the OTS produce output in the protocol used by the system, or will glueware need to be written to convert from the OTS to the system protocol?Does the software provide extra functionality? Can you turn off any of the functionality? If you have the source code, can you recompile with defined switches or stubs to remove the extra functionality? How much code space (disk, memory, etc.) does the extra software take up? What happens to the system if an unneeded function is accidentally invoked? Lessons learned from earlier projects using OTS software are useful. The following checklist can be used to reduce the risk of using OTS software: No.Items To Be ConsideredDoes It Apply? (yes/no)Planned Action1*Has the vendors facilities and processes been audited? Allow an audit of their facility and processes. If for any reason an audit cannot be conducted then the OTS software is considered an unmitigated significant hazard, and as such, the OTS software may be inappropriate for the intended device.2*Are the verification and validation activities for the OTS appropriate? Demonstrate that the verification and validation activities performed for the OTS software are appropriate and sufficient to fulfill the safety and effectiveness requirements for the device.3*Can the project maintain the OTS independent of vendor support? Ensure that the project can maintain the OTS software even if the original developer ceases support.4Does software contain interfaces, firewalls, wrappers, etc.? Consider interfaces, firewalls, wrappers and glue early in the process. When creating wrappers avoid dependency on internal product interfaces and functionality or isolate the dependencies.4Does software provide diagnostics? Look for built-in diagnostics and error handling.5Any key products influencing choices? Identify key products (or strategies or standards) that can influence other choices before product evaluation.6Has the software vendor been used before? Employ any past experience with vendor/product. Ask for information from other projects. Use databases of information, keeping in mind that the behavior of a product can change depending on how it is used.7Is this the initial version? Do not buy a version 1.0.8Have competitors been researched? Ask competitors of the products about the other products.9Is the source code available? Consider buying the source code so you can perform your own testing. Note that this is expensive and will usually require waiving technical support and/or the warranty.10Are industry standard interfaces available? Ensure the product uses industry standard interfaces.11Has product research been thorough? Base product selection on analysis of the facts.12Is the validation for the OTS driver software package available? Include the validation process for the OTS driver software package as part of the system interface validation process. This includes the verification of the data values in both directions for the data signals; various mode settings for control signals in both directions (if applicable); and the input/output interrupt and timing functions of the driver with the CPU and operating system.13Are there features that will not be used? Determine how to handle unused features.14Have tools for automatic code generation been independently validated? Determine whether tools for automatic code generation have been independently validated. OTS tool selection should follow the same process as component selection.15Can previous configurations be recovered? Reevaluate each version and ensure that the previous configuration can be restored.16Will a processor require a recompile? Perform a complete and comprehensive retest of the system replacing a processor that requires a recompile.17Has a safety impact assessment been performed? Perform a safety impact assessment when new or modified OTS components are placed in a baselined system. Document hazards in a Failure Modes and Effects Analysis (FMEA) table. Ensure there is traceability between the hazard reports, the design requirements, and the test reports. Analysis should include the review of known problem reports, user manuals, specifications, patches, literature and internet searches for other users experience with this OTS Software.18Will the OTS tools affect safety? Keep in mind the tools purpose when selecting OTS tools. Determine whether the results are easy to verify and whether the results of the tools use will influence decisions that affect safety.19Is the OTS being used for the proper application? Use OTS products for the purpose for which they were created.20Is there compatibility between the OTS hardware and software? Realize that not all OTS hardware can run all OTS software.21Does the vendor have ISO certification? Determine whether the vendor is ISO certified or has been awarded a SEI rating of 3 or higher. This provides confidence that their development process is adequate.22Does the vendor receive quality products from their suppliers? Ensure that vendors are aware that they are responsible for the product quality from their contractors and subcontractors.* A PROJECT WITH LIFE THREATENING HAZARDS MUST DO THESE ITEMS E.2 Generic Software Safety Requirements From MSFC REQUIREMENT TO BE METAPPLICABILITY Yes/No/PartialACTION Accept/WorkThe failure of safety critical software functions shall be detected, isolated, and recovered from such that catastrophic and critical hazardous events are prevented from occurring.Software shall perform automatic Failure Detection, Isolation, and Recovery (FDIR) for identified safety critical functions with a time to criticality under 24 hours.Automatic recovery actions taken shall be reported to the crew, ground, or controlling executive. There shall be no necessary response from crew or ground operators to proceed with the recovery action.The FDIR switch over software shall be resident on an available, non-failed control platform which is different from the one with the function being monitored.Override commands shall require multiple operator actions.Software shall process the necessary commands within the time to criticality of a hazardous event.Hazardous commands shall only be issued by the controlling application, or by the crew, ground, or controlling executive.Software that executes hazardous commands shall notify the initiating crew, ground operator, or controlling executive upon execution or provide the reason for failure to execute a hazardous command.Prerequisite conditions (e.g., correct mode, correct configuration, component availability, proper sequence, and parameters in range) for the safe execution of an identified hazardous command shall be met before execution.In the event that prerequisite conditions have not been met, the software shall reject the command and alert the crew, ground operators, or the controlling executive.Software shall make available status of all software controllable inhibits to the crew, ground operators, or the controlling executive.Software shall accept and process crew, ground operator, or controlling executive commands to activate/deactivate software controllable inhibits.Software shall provide an independent and unique command to control each software controllable inhibit.Software shall incorporate the capability to identify and status each software inhibit associated with hazardous commands.Software shall make available current status on software inhibits associated with hazardous commands to the crew, ground operators, or controlling executive.All software inhibits associated with a hazardous command shall have a unique identifier.Each software inhibit command associated with a hazardous command shall be consistently identified using the rules and legal values.If an automated sequence is already running when a software inhibit associated with a hazardous command is activated, the sequence shall complete before the software inhibit is executed.Software shall have the ability to resume control of an inhibited operation after deactivation of a software inhibit associated with a hazardous command.The state of software inhibits shall remain unchanged after the execution of an override.Software shall provide error handling to support safety critical functions.Software shall provide caution and warning status to the crew, ground operators, or the controlling executive.Software shall provide for crew/ground forced execution of any automatic safing, isolation, or switch over functions.Software shall provide for crew/ground forced termination of any automatic safing, isolation, or switch over functions.Software shall provide procession for crew/ground commands in return to the previous mode or configuration of any automatic safing, isolation, or switch over function.Software shall provide for crew/ground forced override of any automatic safing, isolation, or switch over functions.Software shall provide fault containment mechanisms to prevent error propagation across replaceable unit interfaces.Hazardous payloads shall provide failure status and data to core software systems. Core software systems shall process hazardous payload status and data to provide status monitoring and failure annunciation.Software (including firmware) Power On Self Test (POST) utilized within any replaceable unit or component shall be confined to that single system process controlled by the replaceable unit or component.Software (including firmware) POST utilized within any replaceable unit or component shall terminate in a safe state.Software shall initialize, start, and restart replaceable units to a safe state.For systems solely using software for hazard risk mitigation, software shall require two independent command messages for a commanded system action that could result in a critical or catastrophic hazard.Software shall require two independent operator actions to initiate or terminate a system function that could result in a critical hazard.Software shall require three independent operator actions to initiate or terminate a system function that could result in a catastrophic hazard.Operational software functions shall allow only authorized access.Software shall provide proper sequencing (including timing) of safety critical commands.Software termination shall result in a safe system state.In the event of hardware failure, software faults that lead to system failures, or when the software detects a configuration inconsistent with the current mode of operation, the software shall have the capability to place the system into a safe state.When the software is notified of or detects hardware failures, software faults that lead to system failures, or a configuration inconsistent with the current mode of operation, the software shall notify the crew, ground operators, or the controlling executive.Hazardous processes and safing processes with a time to criticality such that timely human intervention may not be available, shall be automated (i.e., not require crew intervention to begin or complete).The software shall notify crew, ground, or the controlling executive during or immediately after execution of an automated hazardous or safing process.Unused or undocumented codes shall be incapable of producing a critical or catastrophic hazard.All safety critical elements (requirements, design elements, code modules, and interfaces) shall be identified as "safety critical."An application software set shall ensure proper configuration of inhibits, interlocks, and safing logic, and exception limits at initialization. E.3 Design for Safety Checklist From a paper given at a talk to the Forth Interest Group (UK) in London during May 1992. Paul E. Bennet Keep the design simple and highly modular. Modularity aids in the isolation of systematic failure modes. Minimize common failure modes. The calculation time for failure probabilities can be extended as by the cube of common mode entries in a fault tree. Identify safe states early in the design. Have these fully checked and verified for completeness and correctness. Ensure that failures of dynamic system activities result in the system achieving a known and clearly identified safe state within a specified time limit. Specify system interfaces clearly and thoroughly. Include, as part of the documentation, the required action or actions should the interface fail. Diagrams convey the most meaning. They can often achieve more than words alone and should be used when presenting design ideas to the customer. Design all systems using the same methodologies framework wherever possible. A well practiced craft helps minimize errors. Paul E. Bennett. 1992, 1999 All rights reserved Paul E. Bennett 1992. Paul E. Bennett can be reached at  HYPERLINK "mailto:PEB@amleth.demon.co.uk" PEB@amleth.demon.co.uk His website is  HYPERLINK "http://www.amleth.demon.co.uk/" http://www.amleth.demon.co.uk/ No liability whatsoever is accepted by Paul E. Bennett for any errors or omission in the presented material as published. E.4 Checklist of generic (language independent) programming practices Taken from nuclear standard, appendix B (see  HYPERLINK "http://www.sohar.com/J1030/appb.htm" http://www.sohar.com/J1030/appb.htm), "Review Guidelines on Software languages for Use in Nuclear Power Plant Safety Systems" Final Report-NUREG/CR-6463 Minimize use of dynamic memory. Using dynamic memory can lead to memory leaks. To mitigate the problem, release allocated memory as soon as possible. Also track the allocations and deallocations closely. Minimize memory paging and swapping. In a real-time system, this can cause significant delays in response time. Avoid gotos. Gotos make execution time behavior difficult to fully predict as well as introducing uncertainty into the control flow. When used, clearly document the control flow, the justification for using gotos, and thoroughly test them. Minimize control flow complexity. Excessive complexity makes it difficult to predict the program flow and impedes review and maintenance. Project guidelines or coding standards should set specific limits on nesting levels. Initialize variables before use! Using uninitialized variables can cause anomalous behavior. Using uninitialized pointers can lead to exceptions or core dumps. In larger routines, use single entry and exit points in subprograms. Multiple entry or exit points introduce control flow uncertainties. In small subprograms, multiple exit points may actually make the routine more readable, and should be allowed. Document any secondary entry and exit points. Minimize interface ambiguities. Interface errors account for many design and coding errors. Look at the interfaces to hardware, other software, and to human operators. Use data typing. If the language does not enforce it, include it in the coding standards and look for it during formal inspections. Provide adequate precision and accuracy in calculations, especially within safety critical modules. Use parentheses to specify precedence order, rather than relying on the order inherent in the language. Assumptions about precedence often lead to errors, and the source code can be misinterpreted when reviewing it. Avoid functions or procedures with side effects. Side effects can lead to unplanned dependencies, and ultimately to bugs. Separate assignments from evaluation statements. Mixing them can cause unanticipated side effects. An example of a mixed assignment/evaluation statement is: y = toupper(x=getchar()); // x=getchar() should be on separate line Instrumentation (debugging statements, etc) should be highly visible. If left in the run-time system, it should be minimized to avoid timing perturbations. Visibility allows the real code to be obvious when the source code is reviewed, and it makes it easier to be sure all instrumentation is removed for the run-time system. Minimize dynamic binding. Dynamic binding is a necessary part of polymorphism. When used, it should be justified. Keep in mind that it causes unpredictability in name/class association and reduces run-time predictability. Be careful when using operator overloading. While it can help achieve uniformity across different data types (which is good), it can also confuse the reader (and programmers) if used in a non-intuitive way. Use tasking with care. While it adds many good features to programs (e.g. splitting the work into logical units, each of which can be tested independently), it can also lead to timing uncertainties, sequence of execution uncertainties, vulnerability to race conditions, and deadlocks. Minimize the use of interrupt driven processing. Interrupts lead to non-deterministic response times, which is very important in real-time systems. The best way to handle this is to have the interrupt processing do the bare minimum, and return to primary program control as soon as possible. Check how the operating system does time slicing (usually using clock interrupts), and what overhead or problems may be inherent in their implementation. Handle exceptions locally, when possible. Local exception handling helps isolate problems more easily and more accurately. If it is not possible to do this, then thorough testing and analysis to verify the softwares behavior during exception testing is recommended. Check input data validity. Checking reduces the probability of incorrect results, which could lead to further errors or even system crashes. If the input can be trusted, then checking is not necessary. Check the output data validity, if downstream input checking is not performed. This reduces incorrect results, which can have mild to major effects on the software. Control the use of built-in functions through project specific guidelines. Built-in functions (usually in the language library) have unknown internal structure, limitations, precision, and exception handling. Thoroughly test the functions that will be used, use a certified compiler, or review formal testing done on the compiler. Create coding standards for naming, indentation, commenting, subprogram size, etc. These factors affect the readability of the source code, and influence how well reviews and inspections can find errors. When doing mixed-language programming, separate out the foreign code, to enhance readability. Also document it well, including the justification. Mixed-language programming should be used only when necessary (such as accessing hardware with C, from a Java program). Use single purpose functions and procedures. This facilitates review and maintenance of the code. Use each variable for a single purpose only. Reusing a variable (usually a local) makes the source code confusing to read and maintain. If the variable is named properly for its original purpose, it will be misnamed for the new purpose. If the hardware configuration may change, for this project or in the future, isolate hardware-dependent code. Check for dead code. Unreachable code may indicate an error. It also causes confusion when reading the code. Use version control tools (configuration management). Utilize a bug tracking tool or database. Once a bug is found, it should be tracked until eliminated. Bug databases are also good sources to use when creating checklists for code inspections. Avoid large if-then-else and case statements. Such statements are extremely difficult to debug, because code ends up having so many different paths. The difference between best-case and worst-case execution time becomes significant. Also, the difficulty of structured code coverage testing grows exponentially with the number of branches. Avoid implementing delays as no-ops or empty loops. If this code is used on a different processor, or even the same processor running at a different rate (for example, a 25MHz vs. 33MHz CPU), the code may stop working or work incorrectly on the faster processor. E.5 Checklist of assembly programming practices for safety Use the macro facility of the assembler, if it exists, to simplify the code and make it more readable. Use if/else and loop control of the macro facility. If using labels, make the names meaningful. Label1 is not meaningful. Be careful of to check the ??? base of numbers (decimal, octal, hexadecimal) Use comments to describe WHAT the procedure or section is meant to do. It is not always clear from the assembly code. Update comments when the code changes, if the intent of the procedure or section changes as well. Use named code segments if possible. Consider separate segments for reset, non-volatile memory initialization, timer interrupts, and other special-purpose code E.6 Checklist of C programming practices for safety Taken from nuclear standard, appendix B (see  HYPERLINK "http://www.sohar.com/J1030/appb.htm" http://www.sohar.com/J1030/appb.htm) Refer to generic list as well Limit the number and size of parameters passed to routines. Too many parameters affect readability and testability of the routine. Large structures or arrays, if passed by value, can overflow the stack, causing unpredictable results. Always pass large elements via pointers. Use recursive functions with great care. Stack overflows are common. Verify that there is a finite recursion! Utilize functions for boundary checking. Since C does not do this automatically, create routines that perform the same function. Accessing arrays or strings out-of-bounds is a common problem with unpredictable, and often major, results. Do not use the gets function, or related functions. These do not have adequate limit checks. Writing your own routine allows better error handling to be included. Use memmove, not memcpy. Memcpy has problems if the memory overlaps. Create wrappers for built-in functions to include error checking. If ifelse ifelse if gets beyond two levels, use a switchcase instead. This increases readability. When using switchcase, always explicitly define default. If a break is omitted, to allow flow from one case to another, explicitly document it. Initialize local (automatic) variable. They contain garbage before explicit initialization. Pay special attention to pointers, since they can have the most dangerous effects. Initialize global variables in a separate routine. This ensures that variables are properly set at warm reboot. Check pointers to make sure they dont reference variables outside of scope. Once a variable goes out of scope, what it contains is undefined. Only use setjmp and longjmp for exception handling. These commands jump outside function boundaries and deviate from normal control flow. Avoid pointers to functions. These pointers cannot be initialized and may point to non-executable code. If they must be used, document the justification. Prototype all functions and procedures! This allows the compiler to catch errors, rather than having to debug them at run-time. Also, when possible, use a tool or other method to verify that the prototype matches the function. Minimize interface ambiguities, such as using expressions as parameters to subroutines, or changing the order of arguments between similar functions. Also justify (and document) any use of functions with an indefinite number of arguments. These functions cannot be checked by the compiler, and are difficult to verify. Do no use ++ or operators on parameters being passed to subroutines or macros. These can create unexpected side effects. Use bit masks instead of bit fields, which are implementation dependent. Always explicitly cast variables. This enforces stronger typing. Casting pointers from one type to another should be justified and documented. Avoid the use of typedefs for unsized arrays. This feature is badly supported and error-prone. Avoid mixing signed and unsigned variables. Use explicit casts when necessary. Dont compare floating point numbers to 0, or expect exact equality. Allow some small differences due to the precision of floating point calculations. Enable and hread compiler warnings. If an option, have warnings issued as errors. Warnings indicate deviation that may be fine, but may also indicate a subtle error. Be cautious if using standard library functions in a multitasking environment. Library functions may not be re-entrant, and could lead to unspecified results. Do not call functions within interrupt service routines. If it is necessary to do so, make sure the functions are small and re-entrant. Avoid the use of the ?: operator. The operator makes the code more difficult to read. Add comments explaining it, if it is used. Place #include directives at the beginning of a file. This makes it easier to know what files are actually included. When tracing dependencies, this information is needed. Use #define instead of numeric literals. This allows the reader or maintainer to know what the number actually represents (RADIUS_OF_EARTH_IN_KM, instead of 6356.91). It also allows the number to be changed in one place, if a change is necessitated later. Do not make assumptions about the sizes of dependent types, such as int. The size is often platform and compiler dependent. Avoid using reserved words or library function names as variable names. This could lead to serious errors. Also, avoid using names that are close to standard names, to improve the readability of the source code. E.7 Checklist of C++ programming practices for safety Taken from nuclear standard, appendix B (see  HYPERLINK "http://www.sohar.com/J1030/appb.htm" http://www.sohar.com/J1030/appb.htm) Include all C programming practices and generic practices as well Always pass large parameters (structures, arrays, etc.) via reference. If it will not be changed, pass it as const. Group related parameters within a class, to minimize the number of parameters to be passed to a routine. Avoid multiple inheritance, which can cause ambiguities and maintenance problems. When overloading an operator, make sure that its usage is natural, not clever. Obscure overloading can reduce readability and induce errors. Using + to add two structures is natural. Using + with structures to perform a square of each element is not natural. Explicitly define class operators (assignment, etc.). Declare them private if they are not to be used. For all classes, define the following: Default constructor, copy constructor, destructor, operator=. Declare the destructor virtual. This is necessary to avoid problems if the class is inherited. Use throw and catch for exception handling, not Cs setjmp and longjmp, which are difficult to recover from. Avoid pointers to members. These unnecessarily complicate the code. Use const variables and functions whenever possible. When something should not change, or a function should not change anything outside of itself, use const. E.8 Checklist of Fortran programming practices for safety The following is extracted from Appendix A of Hatton, L. (1992) "Fortran, C or C++ for geophysical software development", Journal of Seismic Exploration, 1, p77-92. Unreachable code. This reduces the readability and therefore maintainability. Unreferenced labels. Confuses readability. The EQUIVALENCE statement except with the project manager's permission. This statement is responsible for many questionable practices in Fortran giving both reliability and readability problems. Permission should not be given lightly. A really brave manager will unequivocally forbid its use. Some programming standards do precisely this. Implicit reliance on SAVE. (This prejudices re-usability). A particular nasty problem to debug. Some compilers give you SAVE whether you specify it or not. Moving to any machine which implements the ANSI definition from one which SAVE's by default may lead to particularly nasty run and environment sensitive problems. This is an example of a statically detectable error which is almost impossible to find in a source debugger at run-time. The computed GOTO except with the project manager's permission. Often used for efficiency reasons when not justified. Efficiency should never precede clarity as a programming goal. The motto is "tune it when you can read it". Any Hollerith. This is non-ANSI, error-prone and difficult to manipulate. Non-generic intrinsics. Use generic intrinsics only on safety grounds. For example, use REAL() instead of FLOAT(). Use of the ENTRY statement. This statement is responsible for unpredictable behavior in a number of compilers. For example, the relationship between dummy arguments specified in the SUBROUTINE or FUNCTION statement and in the ENTRY statements leads to a number of dangerous practices which often defeat even symbolic debuggers. BN and BZ descriptors in FORMAT statements. These reduce the reliability of user input. Mixing the number of array dimensions in calling sequences. Although commonly done, it is poor practice to mix array dimensions and can easily lead to improper access of n-dimensional arrays. It also inhibits any possibility of array-bound checking which may be part of the machine's environment. Unfortunately this practice is very widespread in Fortran code. Use of BLANK='ZERO' in I/O. This degrades the reliability of user input. Putting DO loop variables in COMMON. Forbidden because they can be inadvertently changed or even lead to bugs in some optimizing compilers. Declarations like REAL R(1). An old-fashioned practice which is frequently abused and leads almost immediately to array-bound violations whether planned or not. Array-bound violations are responsible for a significant number of bugs in Fortran. Passing an actual argument more than once in a calling sequence. Causes reliability problems in some compilers especially if one of the arguments is an output argument. A main program without a PROGRAM statement. Use of the program statement allows a programmer to give a module a name avoiding system defaults such as main and potential link clashes. Undeclared variables. Variables must be explicitly declared and their function described with suitable comment. Not declaring variables is actually forbidden in C and C++. The IMPLICIT statement. Implicit declaration is too sweeping unless it is one of the non-standard versions such as IMPLICIT NONE or IMPLICIT UNDEFINED. Labeling any other statement but FORMAT or CONTINUE. Stylistically it is poor practice to label executable statements as inserting code may change the logic, for example, if the target of a DO loop is an executable statement. This latter practice is also obsolescent in Fortran 90. The DIMENSION statement. It is redundant and on some machines improperly implemented. Use REAL etc. instead. READ or WRITE statements without an IOSTAT clause. All READ and WRITE statements should have an error status requested and tested for error-occurrence. SAVE in a main program. It merely clutters and achieves nothing. All referenced subroutines or functions must be declared as EXTERNAL. All EXTERNALS must be used. Unless EXTERNAL is used, names can collide surprisingly often with compiler supplied non-standard intrinsics with strange results which are difficult to detect. Unused EXTERNALS cause link problems with some machines, leading to spurious unresolved external references. Blank COMMON. Use of blank COMMON can conflict with 3rd. party packages which also use it in many strange ways. Also the rules are different for blank COMMON than for named COMMON. Named COMMON except with the project manager's permission. COMMON is a dangerous statement. It is contrary to modern information hiding techniques and used freely, can rapidly destroy the maintainability of a package. The author has bitter, personal experience of this ! Some company's safety-critical standards for Fortran explicitly forbid its use. Use of BACKSPACE, ENDFILE, REWIND, OPEN, INQUIRE and CLOSE. Existing routines for each of these actions should be designed and must always be used. Many portability problems arise from their explicit use, for example, the position of the file after an OPEN is not defined. It could be at the beginning or the end of the file. The OPEN should always therefore be followed by a REWIND, which has no effect if the file is already positioned at the beginning. OPEN and INQUIRE cause many portability problems, especially with sequential files. DO loops using non-INTEGER variables. The loop may execute a different number of times on some machines due to round-off differences. This practice is obsolescent in Fortran 90. Logical comparison of non-INTEGERS. Existing routines for this should be designed which understand the granularity of the floating point arithmetic on each machine to which they are ported and must always be used. Many portability problems arise from its explicit use. The author has personal experience whereby a single comparison of two reals for inequality executed occasionally in a 70,000 line program caused a very expensive portability problem. Any initialization of COMMON variables or dummy arguments is forbidden inside a FUNCTION, (possibility of side-effects). Expression evaluation order is not defined for Fortran. If an expression contains a function which affects other variables in the expression, the answer may be different on different machines. Such problems are exceedingly difficult to debug. Use of explicit unit numbers in I/O statements. Existing routines to manipulate these should be designed and must always be used. Many portability problems arise from their explicit use. The ANSI standard only requires them to be non-negative. What they are connected to differs wildly from machine to machine. Don't be surprised if your output comes out on a FAX machine ! CHARACTER*(N) where N>255. A number of compilers do not support character elements longer than 255 characters. FORMAT repeat counts > 255. A number of compilers do not support FORMAT repeat counts of more than 255. COMMON blocks called EXIT. On one or two machines, this can cause a program to halt unexpectedly. Comparison of strings by other than the LLE functions. Only a restricted collating sequence is defined by the ANSI standard. The above functions guarantee portability of comparison. Using the same character variable on both sides of an assignment. If character positions overlap, this is actually forbidden by the standard but some compilers allow it and others don't. It should simply be avoided. The restriction has been removed in Fortran 90. Tab to a continuation line. Tabs are not part of the ANSI Fortran definition. They are however easily removable if used only to code lines and for indentation. If they are also used for continuation (like the VAX for example), it means they become syntactic and if your compiler does not support them, removing them is non-trivial. Use of PAUSE. An obsolescent feature with essentially undefined behavior. Use of '/' or '!' in a string initialized by DATA. Some compilers have actually complained at this ! Using variables in PARAMETER, COMMON or array dimensions without typing them explicitly before such use. e.g. PARAMETER (R=3) INTEGER R Some compilers get it wrong. Use of CHAR or ICHAR. These depend on the character set of the host. Best to map onto ASCII using wrapper functions, but almost always safe today. Use of ASSIGN or assigned GOTO. An obsolescent feature legendary for producing unreadable code. Use of Arithmetic IF. An obsolescent feature legendary for producing unreadable code. Non-CONTINUE DO termination. An obsolescent feature which makes enhancement more difficult. Shared DO termination for nested DO loops. An obsolescent feature which makes enhancement more difficult. Alternate RETURN. An obsolescent feature which can easily produce unreadable code. Use of Fortran keywords or intrinsic names as identifier names. Keywords may be reserved in future Fortran standards. The practice also confuses readability, for example, IF (IF(CALL)) STOP=2 Some people delight in this sort of thing. Such people do not take programming seriously. Use of the INTRINSIC statement. The ANSI standard is particularly complex for this statement with many exceptions. Avoid. Use of END= or ERR= in I/O statements. (IOSTAT should be used instead). Using END= and ERR= with associated jumps leads to unstructured and therefore less readable code. Declaring and not using variables. This just confuses readability and therefore maintainability. Using COMMON block names as general identifiers, where use of COMMON has been approved. This practice confuses readability and unfortunately, compilers from time to time. Using variables without initializing them. Reliance on the machine to zero memory for you before running is not portable. It also produces unreliable effects if character strings are initialized to zero, (rather than blank). Always initialize variables explicitly. Use of manufacturer specific utilities unless specifically approved by the project manager. This simply reduces portability, in some cases pathologically. Use of non-significant blanks or continuation lines within user-supplied identifiers. This leads both to poor readability and to a certain class of error when lists are parsed, (it may have been a missing comma). Use of continuation lines in strings. It is not clear if blank-padding to the end of each partial line is required or not. Passing COMMON block variables through COMMON and through a calling sequence. This practice is both illegal and unsafe as it may confuse optimizing compilers and in some compilers simply not work. It is a very common error. An IF..ELSEIF block IF with no ELSE. This produces a logically incomplete structure whose behavior may change if the external environment changes. A frequent source of "unexpected software functionality". DATA statements within subroutines or functions. These can lead to non-reusability and therefore higher maintenance and development costs. If constants are to be initialized, use PARAMETER. DO loop variables passed as dummy arguments. Equivalencing any arrays other than at their base, even if use of EQUIVALENCE has been approved. Some machines still have alignment problems and also modern RISC platforms rely on good alignment for efficiency. So at best, it will be slow and at worst, it will be wrong. Equivalencing any variable with COMMON, even if use of EQUIVALENCE and COMMON has been approved. This rapidly leads to unreadable code. Type conversions using the default rules, either in DATA or assignment statements. Type conversions should be performed by the programmer - state what you mean. For example: R = I Wrong R = REAL(I) Right Use of mixed-type arithmetic in expressions. Use of precedence in any kind of expression. Parenthesize to show what you mean. Although Fortran precedence is relatively simple compared with C which has 15 levels of precedence, it is still easy to get it wrong. Concatenated exponentiation without parenthesizing, e.g. a**b**c. People too often forget what this means. Exponentiation associates from the right. Calling sequence matching. Make sure that calling sequence arguments match in type number and direction. Inconsistencies here are responsible for many unreliability problems in Fortran. E.9 Checklist of Pascal programming practices for safety Taken from nuclear standard, appendix B (see  HYPERLINK "http://www.sohar.com/J1030/appb.htm" http://www.sohar.com/J1030/appb.htm) Refer to generic list as well If using pointers, use handles whenever possible. Handles allow the memory management to recapture and compact free memory. Use care with multiple-condition flow statements. The order of evaluation cannot be guaranteed. Isolate interrupt receiving tasks into implementation dependent packages. Pass the interrupt to the main tasks via a normal entry. Interrupt entries are implementation dependent features that may not be supported. Use symbolic constants instead of numeric literals. This increases readability and maintainability. Avoid the use of the mod operator. Not all compilers follow the Standard, and this will create portability problems. E.10 Checklist for Visual Basic From An Evaluation of Object-Based Programming with Visual Basic, James M. Dukovic and Daniel T. Joyce, 0-7803-2492-7/95 IEEE If you want to add a public function or subroutine to a form, place it in a code module having the same name as the form. Place private functions and subroutines inside the general procedure section of the form. This is required because general procedures inside forms are not visible to other objects. Do not use global variables. Use procedure-level variables for temporary variables and module-level variables for object data. This will require you to pass parameters to all methods ensuring a cleaner interface. Do not use the static statement to declare variables or procedures. Use module-level variables for all object data. Static variables can become lost in your code. Create handles to access properties declared in code modules. You may access properties in forms and controls directly. This will help hide the implementation of the object. Code modules should be objects. They should have data and methods (subroutines and functions) to access and manipulated the data. Use object-oriented design techniques to define your objects. Forms should only contain code that is unique to the form. Most code should be placed in modules. Forms are likely to change. Modules are much more stable. Set the Visual Basic Environment Option called Require Variable Declaration to Yes. This will force you to declare variables explicitly. It does not, however, force you to specify a type. Explicitly declare data types for all variables and functions. This will improve readability and reduce the risk of data type errors. Hide the implementation of an object as much as possible. The objects interface should reveal as little about the implementation and underlying data structure as possible. This will allow you to make changes to an object without impacting other objects. Avoid using environment specific parameters in the objects interface. For example, small, medium or large is preferable to passing pixels or twips. Use the variant data type sparingly. Although it allows a form of parametric polymorphism, the resultant code can be difficult to understand and its use will increase the risk of data type errors. Declare subroutines and functions as private whenever possible. Subroutines and functions should only be public if they are used by other objects. This will make the code more readable and it will prevent other objects from accessing private methods directly. Document your interfaces at the top of each method. Include variable types, sizes, and allowable values. Use standard objects whenever possible. For example, use message boxes and dialogue boxes instead of creating specific forms. Do not use the OptionBase statement to alter the lower bound of array subscripts. Altering the lower bound may make reuse more difficult. Design your objects with weak coupling. That is, create your object so its dependency on other objects is minimal. This will make it easier to understand your objects. E.11 Checklist for selecting an RTOS From Selecting a Real-Time Operating System, Greg Hawley, Embedded Systems Programming, March, 1999 CriteriaConsiderationsLanguage/Microprocessor SupportThe first step in finding an RTOS for your project is to look at those vendors supporting the language and microprocessor youll be using.Tool CompatibilityMake sure your RTOS works with your ICE, compiler, assembler, linker, and source code debuggers.ServicesOperating systems provide a variety of services. Make sure your OS supports the services (queues, times, semaphores) you expect to use in your design.FootprintRTOSes are often scalable, including only those services you end up needing in your applications. Based on what services youll need, and the number of tasks, semaphores, and everything else you expect to use, make sure your RTOS will work in the RAM and ROM you havePerformanceCan your RTOS meet your performance requirements? Make sure you understand benchmarks vendors give you and how they apply to the hardware you will really be using.Software ComponentsAre required components (protocol stacks, communications services, real-time databases, Web services, virtual machines, graphics libraries, and so on) available for your RTOS? How much effort will it be to integrate them?Device DriversIf youre using common hardware, are device drivers available for your RTOS?Debugging ToolsRTOS vendors may have debugging tools that help find defects that are harder to find with source-level debuggers (such as deadlocks, forgotten semaphore puts, and so on).Standards CompatibilityAre there safety or compatibility standards your application demands? Make sure your RTOS complies.Technical SupportPhone support is typically covered for a limited time after your purchase or on a year-to-year basis through support. Sometimes applications engineers are available. Additionally, some vendors provide training and consulting.Source vs. Object Code With some RTOSes you get the source code to the operating system when you buy a license. In other cases, you get only object code or linkable libraries.LicensingMake sure you understand how the RTOS vendor licenses their RTOS. With some vendors, run-time licenses are required for each board shipped and development tool licenses are required for each developer.ReputationMake sure youre dealing with someone youll be happy with.ServicesReal-time operating systems provide developers a full complement of features: several types of semaphores (counting, mutual exclusion), times, mailboxes, queues, buffer managers, memory system managers, events, and more.Priority InheritanceMust support, or priority inversion can result E.12 Good Programming Practices Checklist These items should be considered when creating a Coding Standard or when beginning a software project. Programming PracticeYes/No/NAComment or JustificationGeneral SuggestionsCPU self test. Test the CPU on boot up.Fill ROM/RAM/flash with a known pattern (halt, illegal instruction, return) to guard against illegal jumps.ROM tests. Verify integrity of ROM (EEPROM, Flash disk, etc.) prior to executing the software stored in it. Watchdog Timers. Implement a watchdog timer to reboot software if it gets stuck.Guard against Variable Corruption. Store multiple copies of critical variables, especially on different storage media or physically separate memory.Stack Checks. Checking the stack guards against stack overflow or corruption. By initializing the stack to a known pattern, a stack monitor function can be used to watch the amount of available stack space. after the code is written.Write what you need, and use what you write! Dont make unnecessarily verbose or lengthy documentation, unless contractually required. It is better to have short documents that the developers will actually read and use.Initialize all unused memory locations to a pattern that, if executed as an instruction, will cause the system to revert to a known safe state.Dont use a stop or halt instruction. The CPU should be always executing, whether idling or actively processingWhen possible, put safety-critical operational software instructions in nonvolatile read-only memory.Dont use scratch files for storing or transferring safety-critical information between computers or tasks within a computer.Keep Interface Control Documents up to date. Out-of-date information usually leads to one programmer creating a module or unit that will not interface correctly with another unit. The problem isnt found until late in the testing phase, when it is expensive to fix. Prohibit program patches. During development, patching a program is a bad idea. During operations, patching may be a necessity, but should still be carefully considered.Follow the two person rule. At least two people should be thoroughly familiar with the design, code, testing and operation of each software module of the system. If one person leaves the project, someone else understands what is going on.Design IssuesProgram Calculation Checks. Simple checks can be used to give confidence in the results from calculations. Verify all reused code was designed for reuse..Do not implement program as One big loop. When real-time software is designed as a single big loop, we have no flexibility to modify the execution time of various parts of the code independently. Few real-time systems need to operate everything at the same rate.A single large loop forces all parts of the software to operate at the same rate. Analyze hardware peculiarities before starting software design. Avoid inter-module dependencies when possible. This maximizes software reusability.Create more than a single design diagram. Getting the entire design on paper is essential.Design in error detection and handling! Tailor the effort to the level of the code dont put it everywhere! Perform a memory analysis of the design. Estimate how much memory your system uses and adjust the design if the system is bumping up against its limits. Avoid indiscriminate use of interrupts. Use of interrupts can cause priority inversion in real-time systems if not implementedcarefully. Interrupts are perhaps the biggest cause of priority inversion in real-time systems, causing the system to not meet all of its timing requirements. The reason for this delay is that interrupts preempt everything else and aren't scheduled.Thiscarefully.Use come-from checks. For safety critical modules, make sure that the correct module called it, and that it was not called accidentally by a malfunctioning module.Provide separate authorization and separate control functions to initiate a critical or hazardous function. This includes separate arm and fire commands for critical capabilities.Do not use input/output ports for both critical and non-critical functions.Provide sufficient difference in addresses between critical I/O ports and non-critical I/O ports, such that a single address bit failure does not allow access to critical functions or ports.Make sure all interrupt priorities and responses are defined. All interrupts should be initialized to a return, if not used by the software.Provide for an orderly shutdown (or other acceptable response) upon the detection of unsafe conditions. Provide for an orderly system shutdown as the result of a command shutdown, power interruptions, or other failures. Protect against out-of-sequence transmission of safety-critical function messages by detecting and deviation from the normal sequence of transmission. Revert to a known safe state when out-of-sequence messages are detected.Hazardous sequences should not be initiated by a single keyboard entry.Prevent inadvertent entry into a critical routine. Detect such entry if it occurs, and revert to a known safe state.When safety interlocks are removed/bypassed for a test, the software should verify the reinstatement of the interlocks at the completion of the testing.Critical data communicated from one CPU to another should be verified prior to operational use.Set a dedicated status flag that is updated between each step of a hazardous operation. Verify critical commands prior to transmission, and upon reception. It never hurts to check twice!Make sure all flags used are unique and single purpose.Put the majority of safety-critical decisions and algorithms in a single (or few) software development module(s).Decision logic using data from hardware or other software modules should not be based on values of all ones or all zeros. Use specific binary patterns to reduce the likelihood of malfunctioning hardware/software satisfying the decision logic.Perform reasonableness checks on all safety-critical inputs.Perform a status check of critical system elements prior to executing a potentially hazardous sequence.Always initialize the software into a known safe state. This implies making sure all variables are set to an initial value, and not the previous value prior to reset.Dont allow the operator to change safety-critical time limits in decision logic.When the system is safed, usually in response to an anomalous condition or problem, provide the current system configuration to the operator.Create a list of possible hardware failures that may impact the software. The list will be invaluable when testing the error handling capabilities of the software, as well as making sure hardware failures have been considered in the design.Be careful if using multi-threaded programs. Subtle program errors can result from unforeseen interactions among multiple threads. Consider the stability of the requirements. If the requirements are likely to change, design as much flexibility as possible into the system.Design for weak coupling between modules (classes, etc.). The more independent the modules are, the less you can screw them up later in the process. Reduce complexity. Calculate a complexity metric. Look at modules that are very complex and reduce them if possible. Implementation (Coding) IssuesSo not implement delays as empty loops. This can create problems (and timing difficulties) if the code is run on faster or slower machines, or even if recompiled with a newer, optimizing compiler.Avoid fine-grain optimizing during first implementation.Check variables for reasonableness before use. If the value is out of range, there is a problem memory corruption, incorrect calculation, hardware problems (if sensor), etc.Use readbacks to check values. When a value is written to memory, the display, or hardware, another function should read it back and verify that the correct value was written.Safety-critical modules should have only one entry and one exit point.Create a dependency graph. Given such a diagram, its easy to identify what parts of the software can be reused, create a strategy for incremental testing of modules, and develop a method to limit error propagation through the entire system.Consider compiler optimization carefully. Debuggers may not work well with optimized code. Testing IssuesPlan and script all tests. Do not rely on interactive and incomplete test programs. Measure the execution time of your code. Determine if there are any bottlenecks, or any modules that should be considered for optimization.Use execution logging, with independent checking, to find software runaway, illegal functions, or out-of-sequence execution. Test for memory leakage. Instrument the code and run it under load and stress tests. Use a simulator or ICE (In-circuit Emulator) system for debugging in embedded systems.  E.13 Software Requirements Phase Checklist Project:Safety Effort Level:Phase: Software RequirementsSoftware Safety Analyst:TechniquePerform?Justification for not performingPreliminary Hazard Analysis (PHA) Section 2.4Software Safety Requirements Flow-down Analysis Section 5.1.1Checklists and cross references Section 5.1.1.1Requirements Criticality Analysis Section 5.1.2Generic Software Safety Requirements Section 4.2.2Specification Analysis Section 5.1.3Formal Methods - Specification Development Section 4.2.3 Formal Inspections of Specifications Section 4.2.5Timing, Throughput And Sizing Analysis Section 5.1.5Software Fault Tree Analysis Section 5.1.6, Appendix B E.14 Architectural Design Phase Checklist Project:Safety Effort Level:Phase: Architectural Design PhaseSoftware Safety Analyst:TechniquePerform?Justification for not performingCOTS & Software Reuse Considerations Sections 4.3.3 and 7.1Selection of Programming Language, Environment, Tools, and Operating System Section 4.3.4Coding Standards Section 4.3.5Update Criticality Analysis Section 5.2.1Conduct Hazard Risk Assessment Section 5.2.2Analyze Architectural Design Section 5.2.3Interdependence Analysis Section 5.2.4.1Independence Analysis Section 5.2.4.2Update Timing/Throughput/Sizing Analysis Section 5.2.5Update Software Fault Tree Analysis Section 5.2.6Formal Inspections of Architectural Design Products Section 5.2.7Formal Methods and Model Checking Section 5.2.8  E.15 Detailed Design Phase Checklist Project:Safety Effort Level:Phase: Detailed Design PhaseSoftware Safety Analyst:TechniquePerform?Justification for not performingModel Checking Section 4.2.4Data Logic Analysis Section 5.3.1Design Data Analysis Section 5.3.2Design Interface Analysis Section 5.3.3Design Constraint Analysis Section 5.3.4Rate Monotonic Analysis Section 5.3.5Dynamic Flowgraph Analysis Section 5.3.6Markov Modeling Section 5.3.7Measurement of Complexity Section 5.3.8Selection of Programming languages Section 5.3.9Formal Methods and Model Checking Section 5.3.10Requirements State Machines Section 5.3.11, Appendix DFormal Inspections of Detailed Design Products Section 5.3.12Software Failure Modes and Effects Analysis Section 5.3.13, Appendix CUpdates to Previous Analyses (SFTA, Timing, Criticality, etc.) Section 5.3.14 E.16 Implementation Phase Checklist Project:Safety Effort Level:Phase: Software Implementation PhaseSoftware Safety Analyst:TechniquePerform?Justification for not performingCoding Checklists Section 4.5.1Defensive Programming Section 4.5.2Refactoring Section 4.5.3Code Logic Analysis Section 5.4.1Code Data Analysis Section 5.4.2Code Interface Analysis Section 5.4.3Update Measurement of Complexity Section 5.4.4Update Design Constraint Analysis Section 5.4.5Formal Code Inspections, Checklists, and Coding Standards Section 5.4.6Formal Methods Section 5.4.7Unused Code Analysis Section 5.4.8Interrupt Analysis Section 5.4.9Final Timing, Throughput, and Sizing Analysis Section 5.4.10Program Slicing Section 5.4.11Update Software Failure Modes and Effects Analysis Section 5.4.12 E.17 Software Testing Phase Checklist Project:Safety Effort Level:Phase: Software Testing PhaseSoftware Safety Analyst:TechniquePerform?Justification for not performingUnit Level Testing Section 4.5.4Integration Testing Section 4.6.3System and Functional Testing Section 4.6.5Software Safety Testing Section 4.6.6Test Coverage Analysis Section 5.5.1Formal Inspections of Test Plan and Procedures Section 5.5.2Reliability Modeling Section 5.5.3Checklists of Tests Section 5.5.4Test Results Analysis Section 5.5.5Independent Verification and Validation Section 5.5.6 E.18 Dynamic Testing Checklist Project:Safety Effort Level:Phase: Dynamic Testing (Unit or Integration Level)Software Safety Analyst:TechniquePerform?Justification for not performingTypical sets of sensor inputsTest specific functionsVolumetric and statistical testsTest extreme values of inputsTest all modes of each sensorEvery statement executed onceEvery branch tested at least onceEvery predicate term testedEvery loop executed 0, 1, many, max-1, max, max+1 timesEvery path executedEvery assignment to memory testedEvery reference to memory testedAll mappings from inputs checkedAll timing constraints verifiedTest worst case interrupt sequencesTest significant chains of interruptsTest Positioning of data in I/O spaceCheck accuracy of arithmeticAll modules executed at least onceAll invocations of modules tested E.19 Software System Testing Checklist Project:Safety Effort Level:Phase: Software System TestingSoftware Safety Analyst:TechniquePerform?Justification for not performingSimulation (Test Environment)Load TestingStress TestingBoundary Value TestsTest Coverage AnalysisFunctional TestingPerformance MonitoringDisaster TestingResistance to Failure TestingRed Team TestingFormal Progress ReviewsReliability ModelingChecklists of Tests  NASA NPG 8730 Software Independent Verification and Validation (IV&V) Management details the criteria for determining if a project requires IV&V or Independent Assessment (IA). This NPG should be followed by all NASA projects when establishing the level of IV&V or IA required. PAGE viii NASA-GB-1740.13-96  PAGE vi NASA-GB-1740.13 PAGE  PAGE 281 E- NASA-GB-?? PAGE 315 NASA-GB-?? PAGE  KEEP ON GOING UNTIL COMPLETE! Step 1: Gather information on the system hardware and software. Step 2: Perform a Preliminary Hazard Analysis (PHA) a general type of assessment. Step 3: Break system down into subsystems. Step 4: Perform a Subsystem Hazard Analysis (SSHA) more specific to the subsystem. Step 5: Investigate hazards associated with the interfaces. Step 6: Perform a System level Hazard Analysis (SHA) another type of specific assessment. System Safety Software Developers System Developers Software Safety LINES OF COMMUNICATION IT IS VERY IMPORTANT THAT THESE LINES ARE ESTABLISHED WARNING: Requirements are subject to change as the system design progresses! Often items that were assigned to hardware are determined to be better (or more cheaply) developed in software. Some of those items may be safety critical. As system elements are redistributed, it is vital to revisit the software safety effort determination. If the new requirements lead to software controlling hazardous hardware, then more effort needs to be applied to the software safety program. NOTE: The Software Hazard Risk Index is NOT the same as the System Risk Index, though the two may appear similar. The difference is mainly that the System Risk Index of 1 (prohibited) has already been eliminated. The 1s that are shown in the top row of the Software Hazard Criticality Matrix (Table 3-3) are not the same as the 1s in the System Risk matrix However, even if a project should decide it does not intend to employ software controls of safety-critical system functions, some software safety tasks may still be necessary. Whenever possible, select for safety. Otherwise, take steps to mitigate the risks to safety. Definitions Off-the-shelf (OTS) Software not developed in-house or by a contractor for the project. The software is general purpose, or developed for a different purpose from the current project. COTS Commercial-off-the-shelf software. Operating systems, libraries, applications, and other software purchased from a commercial vendor. Not customized for your project. Source code and documentation are often limited. GOTS Government-off-the-shelf software. This was developed in-house, but for a different project. Source code is usually available. Documentation varies. Analyses and test results, including hazard analyses, may be available. Reused software Software developed by the current team (or GOTS) for a different project, portions of which are reused in the current software. While it is tempting to pull out a previously written function for the new project, be aware of how it will operate in the new system. Just because it worked fine in System A does not mean it will work OK in System B. A suitability analysis should be performed. Contracted software Software created for a project by a contractor or sub-contractor. The project defines the requirements the software must meet. Process requirements and safety analyses may be included. This is custom-made software, but not in-house. Glueware Software created to connect the OTS/reused software with the rest of the system. It may take the form of adapters that modify interfaces or add missing functionality, firewalls that isolate the OTS software, or wrappers that check inputs and outputs to the OTS software and may modify either to prevent failures. WARNING: Glueware Needed! Extra Work Ahead! Think before you reuse! Dont assume that if the software works properly, with no hazard potentials or problems, in the old environment it will work properly in the new system. Formal inspection of the requirements/specification by the NASA customer should be used to ensure that the specification is complete and unambiguous. Remember: Software which tests safety critical code must also be considered safety critical. At the end of a lifecycle activity or phase, it is important to verify that All system safety requirements have been satisfied by this lifecycle phase. No additional hazards have been introduced by the work done during this lifecycle phase. IEEE 1228-1994 Regression testing should include functional, performance, stress, and safety testing of the altered code and all modules it interacts with. Any software item identified as a potential hazard cause, control, or mitigation, whether controlled by hardware, software or human operator, is designated as safety critical, and subjected to rigorous software quality control, analysis, and testing. It is also traced through the software safety analysis process until the final verification. A structured development environment and an organization using state of the art methods are prerequisites to developing dependable safety critical software.  I National Aeronautics and Space Administration !"#'/56>    ) ͽybQyy hfS5CJOJQJmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu hfSCJjhfSCJUhfS5B*CJOJQJphhfS5CJOJQJ hfS5hfS hfS5CJ$ hfS5CJ( hfS5CJ0jhfSUmHnHuhfS5CJ0OJQJ"#$%&'()*+,-./>?F 0 } #   X X$xa$x$xa$RC oD E W ) * + - . / 0 1 2 N O P Q T U Z [ \ v w x z { | } ~  ȷȬȬ|k|ȬTȬ|,jhfS>*B*UmHnHphu jwhfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHu hfS5CJOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu j}hfSUmHnHu      ! " # $ % A B C D G H \ ] ^ ݳ{ݳd,jhfS>*B*UmHnHphu jkhfSUmHnHu,jhfS>*B*UmHnHphuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHu jqhfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHu'^ x y z | } ~    ׾{׾d,jhfS>*B*UmHnHphu j_hfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jehfSUmHnHuhfSmHnHu   / 0 1 K L M O P Q R S T p q r s v w ۼͳͣ䌣|kͳͣ| jShfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphujhfS0JUmHnHuhYR+mHnHu jYhfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHu hfS5CJOJQJmHnHu! R  | OzNZ=l;d @ x X X       / 0 1 2 7 8 Y Z [ u v w y z { | } ~ ξεپεviεXi jG hfSUmHnHuhfSOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jMhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu"~  ,-.HIJLMNOPQmnķxgķ j; hfSUmHnHu,j hfS>*B*UmHnHphuhYR+mHnHu jA hfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,j hfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&nopuv78θθyθhθ j/ hfSUmHnHu,j hfS>*B*UmHnHphuhYR+mHnHu j5 hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j hfS>*B*UmHnHphu$89:=>WXYstuwxyz{|ξεپεviεXi j#hfSUmHnHuhfSOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j)hfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j hfS>*B*UmHnHphu"  +,-GHIKLMNOPlmnotuĴuhWh jhfSUmHnHuhfSOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfS56OJQJmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu"!"#$+,789STUWXYZ[\xyܨܗwܨf j hfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSCJOJQJmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu(yz{678:;<=>?οζٿζwjζYj jhfSUmHnHuhfSOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu"?[\]^cd}~ !ķxgķ jhfSUmHnHu,jvhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,j|hfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&!"#&'IJKefgijklmnξεپεveεTe jhfSUmHnHu hfS5CJOJQJmHnHu,jjhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jphfS>*B*UmHnHphu 45689:;<=YZ[\_`ܧܖvܧe jhfSUmHnHu,j^hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfS56OJQJmHnHu,jdhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu'5678=>lmnxg jhfSUmHnHu,jRhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jXhfS>*B*UmHnHphuhfS0JmHnHu%ABC]^_abcdefοζٿζwοζfٿζ jhfSUmHnHu,jFhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jLhfS>*B*UmHnHphu$"#$&'()*+GHοζٿζwοζfٿζ j hfSUmHnHu,j: hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j@hfS>*B*UmHnHphu$d)cYC`4!K Y ! x X X  @HIJQRfghοζٿζwοζfٿζ j"hfSUmHnHu,j."hfS>*B*UmHnHphuhYR+mHnHu j!hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j4!hfS>*B*UmHnHphu$@AB\]^`abcdeοζٿζwοζfٿζ j$hfSUmHnHu,j"$hfS>*B*UmHnHphuhYR+mHnHu j#hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j(#hfS>*B*UmHnHphu$678RSTVWXYZ[wxyz}~οζٿζwgζVg j&hfSUmHnHuhfS56OJQJmHnHu,j&hfS>*B*UmHnHphuhYR+mHnHu j%hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j%hfS>*B*UmHnHphu! !"<=>@ABCDEabcdgh딥ijtdS j(hfSUmHnHuhfS56OJQJmHnHu,j (hfS>*B*UmHnHphuhYR+mHnHu j'hfSUmHnHujhfSUmHnHu hfS5CJOJQJmHnHujhfS0JUmHnHu,j'hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHu   12349:efgǾǧǾǾviǾX j{*hfSUmHnHuhfSOJQJmHnHu,j)hfS>*B*UmHnHphuhYR+mHnHu j)hfSUmHnHu,j)hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHujhfSUmHnHu"=>?Yْj,j+hfS>*B*UmHnHphu ju+hfSUmHnHuhfSCJOJQJmHnHu,j*hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu#YZ[]^_`ab~ȻȰȰpȁȰYȰ,j-hfS>*B*UmHnHphu ji-hfSUmHnHuhfSCJOJQJmHnHu,j,hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jo,hfSUmHnHu"-./123456RSTU\]򿰿饎p򿰿Y,j/hfS>*B*UmHnHphu j]/hfSUmHnHuhfS0J@mHnHu,j.hfS>*B*UmHnHphuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jc.hfSUmHnHuhfSmHnHujhfSUmHnHu! !"#?@ABIJklm޿жЦ珦oжЦX,j1hfS>*B*UmHnHphu jQ1hfSUmHnHuhfSCJOJQJmHnHu,j0hfS>*B*UmHnHphujhfS0JUmHnHuhYR+mHnHu jW0hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHu"()*DEFHIJKLMijͿͮͿp jE3hfSUmHnHuhfSOJQJmHnHu,j2hfS>*B*UmHnHphuhYR+mHnHu jK2hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu#jkltu     ! οζٿζwοζfٿζ j95hfSUmHnHu,j4hfS>*B*UmHnHphuhYR+mHnHu j?4hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j3hfS>*B*UmHnHphu$! " # + , 6 7 8 R S T V W X Y Z [ w x y z  οζٿζwjζYj j-7hfSUmHnHuhfSOJQJmHnHu,j6hfS>*B*UmHnHphuhYR+mHnHu j36hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j5hfS>*B*UmHnHphu" !!!!!!!! !!?!B!C!]!^!_!y!ķxk[khfS56OJQJmHnHuhfS0J@mHnHu,j8hfS>*B*UmHnHphuhYR+mHnHu j'8hfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,j7hfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHuy!z!{!}!~!!!!!!!!!!!!!!!!!!!!!!!"""""ȸȭȭoȀȭXȭ,j:hfS>*B*UmHnHphu j:hfSUmHnHuhfSOJQJmHnHu,j9hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu j!9hfSUmHnHu!!!X""###]$$@%%%Q&& 'j''(((D))*Z**+p+ @ x X""5"6"7"Q"R"S"U"V"W"X"Y"Z"v"w"x"y""""""""""""""""""""""#ܽδΤ卤|δΤe,j<hfS>*B*UmHnHphu j<hfSUmHnHu,j;hfS>*B*UmHnHphujhfS0JUmHnHuhYR+mHnHu j;hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHu'#########:#;#<#=#B#C#_#`#a#{#|#}##########ȹȮȁtctȮL,j>hfS>*B*UmHnHphu j>hfSUmHnHuhfSOJQJmHnHuhfS0J@mHnHu,j=hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu j =hfSUmHnHu################$$$$$$:$;$<$V$W$X$Z$[$\$]$^$_${$|$}$~$ͿͮͿ}f,jt@hfS>*B*UmHnHphu j?hfSUmHnHu,jz?hfS>*B*UmHnHphuhYR+mHnHu j>hfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu%~$$$$$$$$$$$$$$$$$$$$$%%%9%:%;%=%>%?%@%A%B%^%_%`%ܽδΤ夙ܙqδΤ夙ܙZ,jhBhfS>*B*UmHnHphu jAhfSUmHnHu,jnAhfS>*B*UmHnHphuhfS0JmHnHujhfS0JUmHnHuhYR+mHnHu j@hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0J@mHnHu#`%a%f%g%w%x%y%%%%%%%%%%%%%%%%%%%%%%%%%%%%&&&&ϰh,j\DhfS>*B*UmHnHphu jChfSUmHnHu,jbChfS>*B*UmHnHphuhYR+mHnHu jBhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu%&&&.&/&0&J&K&L&N&O&P&Q&R&S&o&p&q&r&u&v&&&&&&&&&&&&&&&ٺ˱ˡ⡖ٖn˱ˡ⡖ٖ jEhfSUmHnHu,jVEhfS>*B*UmHnHphuhfS0JmHnHujhfS0JUmHnHuhYR+mHnHu jDhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0J@mHnHu"&&&&&&&&'''' ' ' ' ' ')'*'+','1'2'G'H'I'c'd'e'g'h'i'j'k'l''̶̿ٿكlك[ٿك jGhfSUmHnHu,jJGhfS>*B*UmHnHphuhfS0JmHnHuhYR+mHnHu jFhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0J@mHnHujhfS0JUmHnHu,jPFhfS>*B*UmHnHphu#'''''''''''''''''''''''''''(((((((((xkkZ jIhfSUmHnHuhfS0J@mHnHu,j>IhfS>*B*UmHnHphuhYR+mHnHu jHhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jDHhfS>*B*UmHnHphuhfS0JmHnHu#(9(:(;(<(?(@(^(_(`(z({(|(~((((((((((((((((ķħh[hfSOJQJmHnHu,j2KhfS>*B*UmHnHphuhYR+mHnHu jJhfSUmHnHujhfSUmHnHuhfS56OJQJmHnHuhfS0J@mHnHujhfS0JUmHnHu,j8JhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu((((((((((((())!)")#)=)>)?)A)B)C)D)E)F)b)c)d)e)j)k)~))))ȻȰȰȻȰhȰ,j&MhfS>*B*UmHnHphu jLhfSUmHnHu,j,LhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jKhfSUmHnHu$))))))))))))))))))))))***** *!*"*#*(*)*7*8*9*S*ȻȰȰȻȰhȰ,jOhfS>*B*UmHnHphu jNhfSUmHnHu,j NhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jMhfSUmHnHu$S*T*U*W*X*Y*Z*[*\*x*y*z*{************************ȻȰȃrȻȰ[ȃ,jQhfS>*B*UmHnHphu jPhfSUmHnHuhfS0J@mHnHu,jPhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jOhfSUmHnHu#*+++++++++5+6+7+8+=+>+M+N+O+i+j+k+m+n+o+p+q+r++++++׾q׾Z,jShfS>*B*UmHnHphu jRhfSUmHnHuhfS0J@mHnHu,jRhfS>*B*UmHnHphuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jQhfSUmHnHuhfSmHnHu ++++++++++++++++++,,$,%,&,@,A,B,D,E,F,G,H,I,e,f,g,ٺ˱ˡٖn˱ˡٖW,jThfS>*B*UmHnHphu jyThfSUmHnHu,jShfS>*B*UmHnHphuhfS0JmHnHujhfS0JUmHnHuhYR+mHnHu jShfSUmHnHujhfSUmHnHuhfSmHnHuhfS0J@mHnHuhfS56OJQJmHnHu"p++G,,---f..5///f004111V22%333R445v55 @ x X Xg,h,j,k,,,,,,,,,,,,,,,,,,,,,, - --------2-3-˽ˬ|˽k| jmVhfSUmHnHuhfS56OJQJmHnHu,jUhfS>*B*UmHnHphuhYR+mHnHu jsUhfSUmHnHujhfSUmHnHuhfSmHnHu hfS5CJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu#3-4-5-:-;-j-k-l----------------------------θθyjθYj jaXhfSUmHnHuhfSCJOJQJmHnHu,jWhfS>*B*UmHnHphuhYR+mHnHu jgWhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jVhfS>*B*UmHnHphu"-.... .!.B.C.D.^._.`.c.d.e.f.g.h.................ķxiXi jUZhfSUmHnHuhfSCJOJQJmHnHu,jYhfS>*B*UmHnHphuhYR+mHnHu j[YhfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jXhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu".........///-/.///2/3/4/5/6/7/S/T/U/V/]/^/s/t/u//////ܪܙyjܪY jI\hfSUmHnHuhfSCJOJQJmHnHu,j[hfS>*B*UmHnHphuhYR+mHnHu jO[hfSUmHnHujhfSUmHnHuhfSOJQJmHnHu,jZhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu"///////////////////////0000%0&0B0C0D0^0_0`0c0d0e0f0g0h0ֶͨ͗wͨf j=^hfSUmHnHu,j]hfS>*B*UmHnHphuhYR+mHnHu jC]hfSUmHnHujhfSUmHnHu,j\hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHu(h0000000000000000000000000111,1-1.1112131415161R1S1ķxgķ j1`hfSUmHnHu,j_hfS>*B*UmHnHphuhYR+mHnHu j7_hfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,j^hfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&S1T1U1Z1[1w1x1y111111111111111111111111111122θθyθhθ j%bhfSUmHnHu,jahfS>*B*UmHnHphuhYR+mHnHu j+ahfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j`hfS>*B*UmHnHphu$22222223242N2O2P2S2T2U2V2W2X2t2u2v2w2|2}2222222222222ξεپεviεXi jdhfSUmHnHuhfSOJQJmHnHu,jchfS>*B*UmHnHphuhYR+mHnHu jchfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jbhfS>*B*UmHnHphu"2222222333333"3#3$3%3&3'3C3D3E3F3K3L3h3i3j333333333333ķxgķ j fhfSUmHnHu,jehfS>*B*UmHnHphuhYR+mHnHu jehfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jdhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&33333333333333333444444.4/404J4K4L4O4P4Q4R4S4T4p4q4οζٿζwοζfٿζ jhhfSUmHnHu,jghfS>*B*UmHnHphuhYR+mHnHu jghfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jfhfS>*B*UmHnHphu$q4r4s4x4y4444444444444444444444 5 55555555θθyjθYj jihfSUmHnHuhfSCJOJQJmHnHu,jxihfS>*B*UmHnHphuhYR+mHnHu jhhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j~hhfS>*B*UmHnHphu"525354555<5=5R5S5T5n5o5p5s5t5u5v5w5x55555555555555555ĵviXi jkhfSUmHnHuhfSOJQJmHnHu,jlkhfS>*B*UmHnHphuhYR+mHnHu jjhfSUmHnHujhfSUmHnHuhfSCJOJQJmHnHujhfS0JUmHnHu,jrjhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu"555 66666696:6;6U6V6W6Z6[6\6]6^6_6{6|6}6~66666666666666666ܪܙyܪh jmhfSUmHnHu,j`mhfS>*B*UmHnHphuhYR+mHnHu jlhfSUmHnHujhfSUmHnHuhfSOJQJmHnHu,jflhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu(5]66H778n88799:b::!;;;J<<&==>~>>*B*UmHnHphuhYR+mHnHu jnhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jZnhfS>*B*UmHnHphu"777777777788 8 8 88888-8.8/8085868J8K8L8f8g8h8k8l8m8n8o8p888ķxgķ jqhfSUmHnHu,jHqhfS>*B*UmHnHphuhYR+mHnHu jphfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jNphfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&88888888888888888888888999/90919495969798999U9V9θθyθhθ jshfSUmHnHu,j*B*UmHnHphuhYR+mHnHu jrhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jBrhfS>*B*UmHnHphu$V9W9X9]9^9x9y9z9999999999999999999999999:::::θθyθhθ juhfSUmHnHu,j0uhfS>*B*UmHnHphuhYR+mHnHu jthfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j6thfS>*B*UmHnHphu$: :!:&:':>:?:@:Z:[:\:_:`:a:b:c:d:::::::::::::::::::::θθyθhθ jwhfSUmHnHu,j$whfS>*B*UmHnHphuhYR+mHnHu jvhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j*vhfS>*B*UmHnHphu$::::::::;;;;; ;!;";#;?;@;A;B;H;I;b;c;d;~;;;;;;;;;;;θθyθhθ jyhfSUmHnHu,jyhfS>*B*UmHnHphuhYR+mHnHu jxhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jxhfS>*B*UmHnHphu$;;;;;;;;;;;;;;;;;<<<< < <&<'<(<B<C<D<G<H<I<J<K<L<h<i<οζٿζwοζfٿζ j{hfSUmHnHu,j {hfS>*B*UmHnHphuhYR+mHnHu jzhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jzhfS>*B*UmHnHphu$i<j<k<q<r<<<<<<<<<<<<<<<<<<<===== =#=$=%=&='=(=D=E=θθyθhθ j}}hfSUmHnHu,j}hfS>*B*UmHnHphuhYR+mHnHu j|hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j|hfS>*B*UmHnHphu$E=F=G=M=N=i=j=k====================>>>>>>> >%>&>θθyθhθ jqhfSUmHnHu,j~hfS>*B*UmHnHphuhYR+mHnHu jw~hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j}hfS>*B*UmHnHphu$&>'>(>.>/>Z>[>\>v>w>x>{>|>}>~>>>>>>>>>>>>>>>>>>>>>??θθyθhθ jehfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jkhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$??? ? ????4?5?6?9?:?;??Z?[?\?]?b?c?v?w?x??????????ξεپεviεXi jYhfSUmHnHuhfSOJQJmHnHu,j܂hfS>*B*UmHnHphuhYR+mHnHu j_hfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu"???????????????????@@@@@@5@6@7@Q@R@S@V@W@X@Y@Z@[@w@x@ķxgķ jMhfSUmHnHu,jЄhfS>*B*UmHnHphuhYR+mHnHu jShfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jփhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&x@y@z@@@@@@@@@@@@@@@@@@@@@ A AA(A)A*A-A.A/A0A1A2ANAOAθθyθhθ jAhfSUmHnHu,jĆhfS>*B*UmHnHphuhYR+mHnHu jGhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jʅhfS>*B*UmHnHphu$@0AAB~BBTCC-DDDTEE*FFF9GGGMHHItII9JJK X @ X xOAPAQAVAWAAAAAAAAAAAAAAAAAAAAAABBBBBBB B!B=B>Bθθyθhθ j5hfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j;hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$>B?B@BEBFBZB[B\BvBwBxB{B|B}B~BBBBBBBBBBBBBBBBBBBBBBBθθyθhθ j)hfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j/hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$BBBCC0C1C2CLCMCNCQCRCSCTCUCVCrCsCtCuC{C|CCCCCCCCCCCCCCCθθyθhθ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j#hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$CCCCC D D D%D&D'D*D+D,D-D.D/DKDLDMDNDQDRD_D`DaD{D|D}DDDDDDDθθyiθXi jhfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu"DDDDDDDDDDDDDDDDDDDDDDDEE0E1E2ELEMENEQERESETEUEVErEsEķxgķ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j hfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&sEtEuEzE{EEEEEEEEEEEEEEEEEEEFFF"F#F$F'F(F)F*F+F,FθθyjθYj jhfSUmHnHuhfSCJOJQJmHnHu,j|hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu",FHFIFJFKFRFSFfFgFhFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFĵveĵ jhfSUmHnHu,jphfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSCJOJQJmHnHujhfS0JUmHnHu,jvhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&FFFGGGGG1G2G3G6G7G8G9G:G;GWGXGYGZGaGbGkGlGmGGGGGGGGGGGGοζٿζwοζfٿζ jhfSUmHnHu,jdhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jjhfS>*B*UmHnHphu$GGGGGGGGGGGGGGGGG H H HHHH)H*H+HEHFHGHJHKHLHMHNHOHkHlHθθyθhθ j՘hfSUmHnHu,jXhfS>*B*UmHnHphuhYR+mHnHu jۗhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j^hfS>*B*UmHnHphu$lHmHnHsHtHHHHHHHHHHHHHHHHHHHHHH I I IIIIIII1I2Iθθyθhθ jɚhfSUmHnHu,jLhfS>*B*UmHnHphuhYR+mHnHu jϙhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jRhfS>*B*UmHnHphu$2I3I4I7I8IPIQIRIlImInIqIrIsItIuIvIIIIIIIIIIIIIIIIIξεپεveεTe jhfSUmHnHu hfS5CJOJQJmHnHu,j@hfS>*B*UmHnHphuhYR+mHnHu jÛhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jFhfS>*B*UmHnHphu IIIIIIIIIJJJ1J2J3J6J7J8J9J:J;JWJXJYJZJ]J^JJJJJJJJJJJJJJܧܖvܧe jhfSUmHnHu,j4hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfS56OJQJmHnHu,j:hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu'JJJJJJJJJJJJKKKKKK!K"K#K$K'K(K1K2K3KMKNKOKRKSKTKUKVKWKsKtKͽudͽ jhfSUmHnHu,j(hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHujhfS0JUmHnHu,j.hfS>*B*UmHnHphuhfS0JmHnHu%KUKKLkLLMqMM$NvNN'OO PPPoQQ1RR SvSS%TTT @   x XtKuKvK{K|KKKKKKKKKKKKKKKKKKKKKKLLLLLLLLL4L5Lθθyθhθ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j"hfS>*B*UmHnHphu$5L6L7L*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$LLLLLLLLMMMMMMMMM6M7M8M9M>M?MMMNMOMiMjMkMnMoMpMqMrMsMMMθθyθhθ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j hfS>*B*UmHnHphu$MMMMMMMMMMMMMMMMMMMMMMMNNNNNN!N"N#N$N%N&NBNCNθθyθhθ juhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j{hfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$CNDNENJNKNRNSNTNnNoNpNsNtNuNvNwNxNNNNNNNNNNNNNNNNNNNNNθθyθhθ jihfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu johfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$NNNNNOOOO O!O$O%O&O'O(O)OEOFOGOHOKOLO|O}O~OOOOOOOOOOθθyiθXi j]hfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jchfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu"OOOOOOOOOOPPPP P P P P P)P*P+P,P/P0PcPdPePPPPPPPPPPPPĴudĴ jQhfSUmHnHu,jԭhfS>*B*UmHnHphuhYR+mHnHu jWhfSUmHnHujhfSUmHnHuhfS56OJQJmHnHujhfS0JUmHnHu,jڬhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&PPPPPPPPPPPPPPPPPQQQQ Q!QKQLQMQgQhQiQlQmQnQoQpQqQξεپεviεXi jEhfSUmHnHuhfSOJQJmHnHu,jȯhfS>*B*UmHnHphuhYR+mHnHu jKhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jήhfS>*B*UmHnHphu"qQQQQQQQQQQQQQQQQQQQQQQQQQ RRR)R*R+R.R/R0R1RķxiXi j9hfSUmHnHuhfSCJOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j?hfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,j°hfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu"1R2R3RORPRQRRRYRZRyRzR{RRRRRRRRRRRRRRRRRRRSSS S S S S SS*S+Sܨܗwܨf j-hfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j3hfSUmHnHujhfSUmHnHuhfSCJOJQJmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu(+S,S-S0S1SRSSSTSnSoSpSsStSuSvSwSxSSSSSSSSSSSSSSSSSSSξεپεviεXi j!hfSUmHnHuhfSOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu j'hfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu"SSSSSSSTTTTTT"T#T$T%T&T'TCTDTETFTKTLT]T^T_TyTzT{T~TTTTTTTTķxgķ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&TTTTTTTTTTTTTTTTTTTTTUUUU U:U;U*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$TBUU VqVVYWW$XXXYYY'ZZZY[[\\\l]]H^^0__ @   xaUbUcUhUiUUUUUUUUUUUUUUUUUUUUUUVVVV V V V V VθθyiθXi jhfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu" V)V*V+V,V2V3VMVNVOViVjVkVnVoVpVqVrVsVVVVVVVVVVVVVVVVVVVWWķxgķ jhfSUmHnHu,jthfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jzhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&WWWWW5W6W7WQWRWSWVWWWXWYWZW[WwWxWyWzWWWWWWWWWWWWWWWWWθθyθhθ jhfSUmHnHu,jhhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jnhfS>*B*UmHnHphu$WWWWWXXXXXX!X"X#X$X%X&XBXCXDXEXIXJXdXeXfXXXXXXXXXXXXξεپεvξεeپε jhfSUmHnHu,j\hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jbhfS>*B*UmHnHphu$XXXXXXXXXXXXXXXXXYYYYYY5Y6Y7YQYRYSYVYWYXYYYZY[YwYxYθθyθhθ jhfSUmHnHu,jPhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jVhfS>*B*UmHnHphu$xYyYzYYYYYYYYYYYYYYYYYYYYYZZZZ Z!Z$Z%Z&Z'Z(Z)ZEZFZθθyθhθ jhfSUmHnHu,jDhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jJhfS>*B*UmHnHphu$FZGZHZNZOZtZuZvZZZZZZZZZZZZZZZZZZZZZZZZZZZZθθyiθXi jhfSUmHnHuhfS56OJQJmHnHu,j8hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j>hfS>*B*UmHnHphu"Z[[[[ [![5[6[7[Q[R[S[V[W[X[Y[Z[[[w[x[y[z[[[[[[[[[[[[[[[[[ķxgķ jhfSUmHnHu,j,hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,j2hfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&[[[[[[[[\\\\\\\\\8\9\:\;\A\B\r\s\t\\\\\\\\\\ξεپεviεXi jhfSUmHnHuhfSOJQJmHnHu,j hfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,j&hfS>*B*UmHnHphu"\\\\\\\\\\\\\\\\\\\]]]]]]H]I]J]d]e]f]i]j]k]l]m]n]]]ķxgķ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSOJQJmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSmHnHu&]]]]]]]]]]]]]]]]]]]]]]]$^%^&^@^A^B^E^F^G^H^I^J^f^g^οζٿζwοζfٿζ jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu$g^h^i^q^r^^^^^^^^^^^^^^^^^^^ _ __(_)_*_-_._/_0_1_οζٿζwgζVg jyhfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphu!1_2_N_O_P_Q_V_W_i_j_k_______________________땦ĴudS jmhfSUmHnHu hfS5CJOJQJmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jshfSUmHnHujhfSUmHnHuhfS56OJQJmHnHujhfS0JUmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHu ______`` ` `$`%`&`@`A`B`E`F`G`H`I`J`f`g`h`i`n`o`````ƽƦƽ|ƽeXƽhfSOJQJmHnHu,jhfS>*B*UmHnHphuhfS56OJQJmHnHuhYR+mHnHu jghfSUmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHu hfS5CJOJQJmHnHujhfS0JUmHnHujhfSUmHnHu__H``:aabbbacc7dddVee+ffflgg7hhiZiii @ x X```````````````aaa2a3a4a7a8a9a:a;a*B*UmHnHphu j[hfSUmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jahfSUmHnHu"aaaaaaaaaaaaaaaaaaaabbbbbbbbb5b6b7b8b?b@b`babbb|b򿰿饎}򿰿f,jhfS>*B*UmHnHphu jOhfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jUhfSUmHnHuhfSmHnHujhfSUmHnHu&|b}b~bbbbbbbbbbbbbbbbbbbbbbbbbcccccc=c>cȹȮȮ}ȹȮfȮYhfSOJQJmHnHu,jhfS>*B*UmHnHphu jChfSUmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jIhfSUmHnHu">c?cYcZc[c^c_c`cacbccccccccccccccccccccccccccc򿲿駐򿲿hYhfSCJOJQJmHnHu,jhfS>*B*UmHnHphu j7hfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHu j=hfSUmHnHuhfSmHnHujhfSUmHnHu"cddd/d0d1d4d5d6d7d8d9dUdVdWdXdydzd{dddddddddddddddݳ|ݳlU,jhfS>*B*UmHnHphuhfS56OJQJmHnHu j+hfSUmHnHu,jhfS>*B*UmHnHphuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu j1hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHu!ddddddddddddddeeee#e$e2e3e4eNeOePeSeTeUeVeWeXeteueve޿жЦ珦oжЦX,jhfS>*B*UmHnHphu jhfSUmHnHuhfSCJOJQJmHnHu,jhfS>*B*UmHnHphujhfS0JUmHnHuhYR+mHnHu j%hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHu"vewe~eeeeeeeeeeeeeeeeeeeeff f#f$f%f(f)f*f+f,f-fIfJfKfLfSfTfkfͿͮͿ}f,jhfS>*B*UmHnHphu jhfSUmHnHu,jhfS>*B*UmHnHphuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfSCJOJQJmHnHuhfS0JmHnHujhfS0JUmHnHu(kflfmffffffffffffffffffffffffffffggggggHgIgJgdg辯|辯e,jhfS>*B*UmHnHphu jhfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu j hfSUmHnHujhfSUmHnHuhfSmHnHu'dgegfgigjgkglgmgnggggggggggggggggggggggggghhh/hȹȮȮ}ȹȮfȮ,jxhfS>*B*UmHnHphu jhfSUmHnHu,j~hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu$/h0h1h4h5h6h7h8h9hUhVhWhXh]h^h}h~hhhhhhhhhhhhhhhhhhhȹȮȮpȁȮYȮ,jlhfS>*B*UmHnHphu jhfSUmHnHuhfSOJQJmHnHu,jrhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu"hhiiiiiii i i&i'i(i)i+i,i6i7i8iRiSiTiWiXiYiZi[i\ixiyizi{iii򿲿駐nW,j`hfS>*B*UmHnHphu jhfSUmHnHu hfS5CJOJQJmHnHu,jfhfS>*B*UmHnHphuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHuhfSmHnHujhfSUmHnHu"iiiiiiiiiiiiiiiiiiiiiiiiijjjjj j*j+jNj򿮿飌{lU,jThfS>*B*UmHnHphuhfSCJOJQJmHnHu jhfSUmHnHu,jZhfS>*B*UmHnHphuhfS0JmHnHu hfS5CJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHuhfSmHnHujhfSUmHnHu!NjOjPjjjkjljojpjqjrjsjtjjjjjjjjjjjjjjjjjkkkk-k辭zjS,jHhfS>*B*UmHnHphuhfS56OJQJmHnHu jhfSUmHnHu,jNhfS>*B*UmHnHphuhfS0JmHnHu hfS5CJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHu irjjQkk0lllXmm nynn,oo pppUqqr{rr>sstqt @ x   -k.k/kIkJkKkNkOkPkQkRkSkokpkqkrkkkkkkkkkkkkkkkkk l ll(l辮{辮d,j<hfS>*B*UmHnHphu jhfSUmHnHu,jBhfS>*B*UmHnHphuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHu#(l)l*l-l.l/l0l1l2lNlOlPlQl[l\lllllllllllllllllllȸȭȭ|k|ȭTȭ,j0hfS>*B*UmHnHphu jhfSUmHnHu hfS5CJOJQJmHnHu,j6hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu lllllllllllmmmmm m#m$m4m5m6mPmQmRmUmVmWmXmYmZmvmwmxmym|m}mmmmݳ{ݳd,j$hfS>*B*UmHnHphu jhfSUmHnHu,j*hfS>*B*UmHnHphuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHu'mmmmmmmmmmmmmmmmmmmnnn n n n n nn*n+n,n-n2n3nUn׾{׾dWhfSOJQJmHnHu,jhfS>*B*UmHnHphu jhfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHuhfSmHnHu"UnVnWnqnrnsnvnwnxnynzn{nnnnnnnnnnnnnnnnnnnnnnnnno o o$o辱~辱g,j hfS>*B*UmHnHphu jhfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHujhfSUmHnHuhfSmHnHu'$o%o&o)o*o+o,o-o.oJoKoLoMoToUoooooooooooooooooooooȻȰȰpȁȰYȰ,jhfS>*B*UmHnHphu jhfSUmHnHuhfSCJOJQJmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu"oopppppp p!p"p>p?p@pApHpIp`papbp|p}p~ppppppppppppppppp򿰿饎}򿰿f,jhfS>*B*UmHnHphu jwhfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu j}hfSUmHnHuhfSmHnHujhfSUmHnHu&pppppppppqqqq q q1q2q3qMqNqOqRqSqTqUqVqWqsqtquqvq{q|qqqȹȮȮpȁȮYȮ,jhfS>*B*UmHnHphu jkhfSUmHnHuhfSOJQJmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSCJOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jqhfSUmHnHu"qqqqqqqqqqqqqqqqqqqq r r rrrrrrr1r2r3r4r9r:rWrXrYrsr򿲿駐򿲿h,jhfS>*B*UmHnHphu j_hfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jehfSUmHnHuhfSmHnHujhfSUmHnHu&srtrurxryrzr{r|r}rrrrrrrrrrrrrrrrrrrrrrrrrssȻȰȰȻȰhȰW hfS5CJOJQJmHnHu,jhfS>*B*UmHnHphu jShfSUmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfSOJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jYhfSUmHnHu"ss6s7s8s;ss?s@s\s]s^s_sbscsssssssssssssssss򿮿飌|k|T,jhfS>*B*UmHnHphu jGhfSUmHnHuhfS56OJQJmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHu hfS5CJOJQJmHnHujhfS0JUmHnHuhYR+mHnHu jMhfSUmHnHuhfSmHnHujhfSUmHnHu ssssss t t ttttttt0t1t2t3t6t7tMtNtOtitjtktntotptqtrtsttttttttۼͳͣ{ͳͣd,jhfS>*B*UmHnHphu j;hfSUmHnHu,jhfS>*B*UmHnHphujhfS0JUmHnHuhYR+mHnHu jAhfSUmHnHujhfSUmHnHuhfSmHnHuhfS56OJQJmHnHuhfS0JmHnHu'ttttttttttttttttttuuuuuu"u#u$u%u&u'uCuDuEuFuIuJu[u\u]uwu辮{辮d,jhfS>*B*UmHnHphu j/hfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHu j5hfSUmHnHujhfSUmHnHuhfSmHnHu'qtt%uuu>vvvEww-xxyyzz {{{V||3}} ~r~~?  wuxuyu|u}u~uuuuuuuuuuuuuuuuuuuuuuuuuvvvvvv6vȸȭȭ|ȸȭeȭ,j hfS>*B*UmHnHphu j# hfSUmHnHu,jhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu j)hfSUmHnHu$6v7v8v;vv?v@v\v]v^v_vbvcv}v~vvvvvvvvvvvvvvvvvvvvvȸȭȭ|ȸȭeȭ,j hfS>*B*UmHnHphu j hfSUmHnHu,j hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu j hfSUmHnHu$vvvvvvvvvwwww!w"w#w=w>w?wBwCwDwEwFwGwcwdwewfwiwjwwwȸȭȭ|kȭTȭ,j hfS>*B*UmHnHphu hfS5CJOJQJmHnHu j hfSUmHnHu,j hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu j hfSUmHnHu wwwwwwwwwwwwwwwww x x x%x&x'x*x+x,x-x.x/xKxLxMxNxQxRxmxnxoxx򿯿餍|򿯿e,j|hfS>*B*UmHnHphu jhfSUmHnHu,jhfS>*B*UmHnHphuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHu jhfSUmHnHuhfSmHnHujhfSUmHnHu&xxxxxxxxxxxxxxxxxxyyyyyyyyy9y:y;y*B*UmHnHphu jhfSUmHnHu,jvhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu$yyyyyyyyyyyyyyyyyy z z zzzzzzz0z1z2z3z7z8zizjzkzzȸȭȭ|ȸȭeȭ,jdhfS>*B*UmHnHphu jhfSUmHnHu,jjhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu$zzzzzzzzzzzzzzzzzz{{{{ { { { { {){*{+{,{0{1{e{f{g{{ȸȭȭ|ȸȭeȭ,jXhfS>*B*UmHnHphu jhfSUmHnHu,j^hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu${{{{{{{{{{{{{{{{{{{{{{{{{{{ | | ||||2|3|4|N|ȸȭȭ|ȸȭeȭ,jLhfS>*B*UmHnHphu jhfSUmHnHu,jRhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu$N|O|P|S|T|U|V|W|X|t|u|v|w|||||||||||||||||}}}+}ȸȭȭ|ȸȭeȭ,j@hfS>*B*UmHnHphu jhfSUmHnHu,jFhfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu +},}-}0}1}2}3}4}5}Q}R}S}T}}}~}}}}}}}}}}}}}}}}}}~ȸȭȭ|ȸȭeȭ,j4hfS>*B*UmHnHphu jhfSUmHnHu,j:hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu ~~~~~ ~ ~ ~ ~(~)~*~+~N~O~P~j~k~l~o~p~q~r~s~t~~~~~~~~~ȸȭȭ|ȸȭeȭ,j(hfS>*B*UmHnHphu jhfSUmHnHu,j.hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu ~~~~~~~~~~~~~789<=>?@A]^_`ȸȭȭ|ȸȭeȭ,jhfS>*B*UmHnHphu jhfSUmHnHu,j"hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu 9:;=>?ȸzqzZzqqI j hfSUmHnHu,j hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS5CJ U hfS5CJ hfShfSB*CJOJQJphjhfSCJOJQJUhfS56OJQJmHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jhfSUmHnHu@k |dÅA  x^` x$xa$$xa$?@AB^_`aɀʀˀ̀45ںѬћ{Ѭj j"hfSUmHnHu,j "hfS>*B*UmHnHphuhYR+mHnHu j!hfSUmHnHujhfSUmHnHu,j!hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHujhfS0JUmHnHuhfS5mHnHu%567jkl݁ށ߁ ŷŦْ{ŷjْ j{$hfSUmHnHu,j#hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j#hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j#hfS>*B*UmHnHphu !"GHIcdehijklm؂قŷŦْ{ŷjْ jo&hfSUmHnHu,j%hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu ju%hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j$hfS>*B*UmHnHphu قڂۂ '()*XYZtuvyz{|}~ŷŦْ{ŷjْ jc(hfSUmHnHu,j'hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu ji'hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j&hfS>*B*UmHnHphu ۃ܃݃0123cdeŷŦْtoeoeNŷ,j)hfS>*B*UmHnHphujhfS:U hfS:hfS5CJ OJQJ hfS5CJ jhfS5CJ UhfS5mHnHuhYR+mHnHu j])hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j(hfS>*B*UmHnHphuʄ˄̄ ABC]^_ȽȲȲᩁȽȲjȲY jK,hfSUmHnHu,j+hfS>*B*UmHnHphu jQ+hfSUmHnHu,j*hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS5mHnHujhfS0JUmHnHuhYR+mHnHujhfSUmHnHu jW*hfSUmHnHu"_abcdef…ÅąŅ :;<>?@ABC_`¹¢¹蹑¹z¹i¹ j?.hfSUmHnHu,j-hfS>*B*UmHnHphu jE-hfSUmHnHu,j,hfS>*B*UmHnHphuhfSmHnHuhfS0JmHnHuhfS5mHnHujhfS0JUmHnHujhfSUmHnHuhYR+mHnHu(`abʆˆ̆͆:;ŷŦْ{ŷjْ j30hfSUmHnHu,j/hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j9/hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j.hfS>*B*UmHnHphu ;<=abc}~·χЇŷŦْ{ŷjْ j'2hfSUmHnHu,j1hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j-1hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j0hfS>*B*UmHnHphu bˈ<yYċ<nxIאJXޘɛ՛cx  x^` ?@A[\]_`abcdĈňƈȈɈʈˈ͈̈ŷŦْ{ŷjْ j4hfSUmHnHu,j3hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j!3hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j2hfS>*B*UmHnHphu 5679:;<=>Z[\]ƉljŷŦْ{ŷjْ j6hfSUmHnHu,j5hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j5hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j4hfS>*B*UmHnHphu ljȉɉ 2345VWXrstvwxyz{ŷŦْ{ŷjْ j8hfSUmHnHu,j7hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j 7hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j6hfS>*B*UmHnHphu ЊъҊ678RSTVWXYZ[wxŷŦْ{ŷjْ j9hfSUmHnHu,jz9hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j8hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,j8hfS>*B*UmHnHphu xyz‹ËċŋƋ5679:;<=>Z[ŷŦْ{ŷjْ j;hfSUmHnHu,jn;hfS>*B*UmHnHphuhfS5mHnHuhYR+mHnHu j:hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,jt:hfS>*B*UmHnHphu [\]ޘƛǛԛ՛23]^_ŷŦْwodo^oYoNoj=hfSU hfS6 hfS0Jjb=hfSUjhfSUhfSB*phhfS hfS:jhfS:UhfS5mHnHuhYR+mHnHu j<hfSUmHnHujhfSUmHnHuhfSmHnHuhfS0JmHnHujhfS0JUmHnHu,jh<hfS>*B*UmHnHphu_hiĞ*#YZz{|-./89bcvwʫ˫jAhfSUj!AhfSUj@hfSUj@hfSUjz?hfSUj>hfSUjr>hfSU hfS6 hfS5hfSjhfSU hfS0J;cm7L|آbv[xɯ & Fx7$8$H$ & F7x ŮƮǮ34:;XYZtuxy°WXhfS5OJQJ hfS0J6jCEhfS6U hfS6jhfS6UjDhfSUjQDhfSUjChfSUjIChfSUjBhfSU hfS0JjhfSUjQBhfSUhfS2&.@HWXOkdE$$IfTl4\:,"04 laT $$Ifa$$ $Ifa$XlݱޱI{ZkdOF$$IfTl\:,"04 laT x$If Xݱޱ{|:;mw!";<=lm(DdYZds EhfS5OJQJhfS56CJ hfS0JjKhfSUjhfSUhfSB*ph hfS5hfSB*ph hfS0J6jJhfS6U hfS6jhfS6U hfS>*hfShfSOJQJ7{|hZZZZ x$IfkdF$$IfTl\:,"04 laT1_hZZZZ x$IfkdG$$IfTl\:,"04 laTʳ:hZZZZ x$Ifkd3H$$IfTl\:,"04 laT:;S|hZZZZ x$IfkdH$$IfTl\:,"04 laTɴ)SmhZZZZZZ x$IfkdcI$$IfTl\:,"04 laTmnopqrh^\\\ZVVVx $ & Fxa$kdI$$IfTl\:,"04 laT uoX=d1@Q@ & Fx & Fx & F & F?x@Gk~$ & F x$Ifa$ $$ & Fxa$ & F & Fx ExddS & F x$If$ & F x$Ifa$kdK$$IfTl4      F $N0          4 laf4TEFOXxddS & F x$If$ & F x$Ifa$kdYL$$IfTl4      F $N0          4 laf4TEF jk67)*euzhfS5CJOJQJhfS56CJOJQJhfS56CJjCShfS6U hfS0JjRhfSUjhfSU hfS0J6j+RhfS6UjhfS6UjhfSCJUmHnHu hfS6hfSOJQJhfS/ xddS & F x$If$ & F x$Ifa$kd M$$IfTl4      F $N0          4 laf4T xddS & F x$If$ & F x$Ifa$kdM$$IfTl4      F $N0          4 laf4TjxddS & F x$If$ & F x$Ifa$kdSN$$IfTl4      F $N0          4 laf4Tjkt}xddS & F x$If$ & F x$Ifa$kdN$$IfTl4      F $N0          4 laf4T6xddS & F x$If$ & F x$Ifa$kdO$$IfTl4      F $N0          4 laf4T67@IxddS & F x$If$ & F x$Ifa$kd?P$$IfTl4      F $N0          4 laf4TxddS & F x$If$ & F x$Ifa$kdP$$IfTl4      F $N0          4 laf4T'\Lxvvooojoo_  & F8<^8 & F & FxkdQ$$IfTl4      F $N0          4 laf4T Gx 78Z H$mx^`ma$x  & F8<^8Hs)$,  e *$If$xa$xx & Fx*V`uvwx'(78IJQR$IfFfU$Ifuvwx2 n$_%`%a%w%y%&&%&'&&'''''U(V(^(_(s(t((()))))))yjchfS6U hfS6jhfS6UhfSCJOJQJhfS5CJOJQJjhfSUmHnHu hfSCJhfShfS6CJOJQJhfS56CJhfSOJQJmH nH uhfSCJOJQJ hfSCJ hfS5CJ hfSH*,RX\]gh-.KL]~$If$%23<=MN_/34XY<$If$IfY|O2A]7 & F 8xxxxFf[$IfFf\X$If7[  j""#o$$%_%a%n%w% $x$Ifa$$xa$  & F vxx  x & Fx w%x%y%%&zodd $$Ifa$ $x$Ifa$kd]$$Ifs4(F }$x x x 0    4 saf4&&&&%&zood $$Ifa$ $x$Ifa$kd^$$Ifs4;F }$ x x x 0    4 saf4%&&&'&I&&zood $$Ifa$ $x$Ifa$kdW_$$Ifs4XF }$ x x x 0    4 saf4&&&''''|unccc $x$Ifa$$x1$$xa$kd`$$Ifs4F }$ x x x 0    4 saf4'''((U(|qqqq $x$Ifa$kd`$$Ifs4F }$`x x x 0    4 saf4U(V(_(h(s(|qqq $x$Ifa$kda$$Ifs4F }$ x x x 0    4 saf4s(t((())|qqq $x$Ifa$kd=b$$Ifs4F }$x x x 0    4 saf4))*)+))0*@*Y*|uum\K$  @ x$Ifa$$  @ x$Ifa$$$xa$$x1$kdb$$Ifs4F }$x x x 0    4 saf4))))0*Y*Z**********P-Q-v-w-x---..S.T.q.r.s.y.z.{.............+/0.0113ƹӤꇁ hfSCJ hfS5CJ hfSCJ hfS>*hfS>*CJOJQJhfSCJOJQJhfS5CJOJQJjlhfS6U hfS6hfSOJQJ hfS5hfS5OJQJhfS56CJhfSjhfS6U hfS0J62Y*Z*[*b*k*t*}**$  @ x$Ifa$$  @ x$Ifa$Lkdd$$IfTl40+" 4 laf4T********dSBBBBB$  @ x$Ifa$$  @ x$Ifa$kdd$$IfTl4ֈ +")     ' F4 laf4T** kdf$$IfTl4ֈ +")   ' F <4 laf4p<T*******$  @ x$Ifa$$  @ x$Ifa$***+$  @ x$Ifa$kd h$$IfTl4ֈ +") 'F 24 laf4p2T******$  @ x$Ifa$***6%$  @ x$Ifa$kdi$$IfTl4ֈ +") 'F (4 laf4p(T******$  @ x$Ifa$**4+..A51*$a$x $  @ xa$kdDk$$IfTl4ֈ +") 'F 4 laf4pT...S.T.V.r.pkdMm$$IfTl0<404 laT $x$Ifa$r.s.u.z. $x$Ifa$pkdm$$IfTl0<404 laTz.{.}.. $x$Ifa$pkdn$$IfTl0<404 laT.... $x$Ifa$pkd/o$$IfTl0<404 laT.... $x$Ifa$pkdo$$IfTl0<404 laT..+/^/0.0113344.5]5vvvv  & Fx^ & Fxxpkdgp$$IfTl0<404 laT 3344.5\5I6]7^77777788`=a=y=z={===>>>>>??@@[AmACBOBCC7ENEEEEEE߶ujrhfS6CJU hfS6CJjhfS6CJUhfS0JCJOJQJjrhfSUjhfSUjqhfS6UhfSB*ph hfS0J6jqhfS6U hfS6jhfS6UhfS hfSCJ hfS5CJ,]5J6t67*:;<`=@@[AoACBQBBBBFCdCCCCD7E & FAx & F@x & Fx & Fx & Fx  & Fx^7EOE`F{F G"GGGHIIIRJJ1KKKKK$If$If $$Ifa$< & F< & Fx  & Fx^ & Fx  & FTx^T & FxE"F#F`FzF G!GGIQJJJJJJ/K0KKKKKKK)L,LTLULVLdLeLfLhLiLwLxLyLLLLLLLLM M-M.MMMOMPMRMSMUMVMMMMMپɸٳٳ٭٭٢ٗ٭٭٭٭٭٭٭٭٭ّ hfSCJj^yhfSUjxhfSU hfSCJ hfS5 hfS0Jj5shfSUjhfSUhfSB*phhfS hfS5CJ hfSCJjhfS6CJUhfS0J6CJ9KKKKKK$If<$If($If]kdt$$Ifl0F#n( 8i    4 laKKKKK LL?77$IfJkdv$$Ifl0n( i4 la$Ifpkdu$$Ifl\#n(s8i4 laL(L)L*L+L,L $<$Ifa$<$If]kd]w$$IflFn(i    4 la$If,L-L?LSLTLULgLhLPJBJP$If$IfJkdx$$Ifl0n( i4 la$If]kdw$$IflpFn(i    4 lahLzLLLLLLLL~~G7kdz$$Ifln(O)i4 la$If]kdy$$Ifl|F !n( hbi    4 laP$If$Ifd$If$IfLLLLLLLL{7kdU{$$Ifln(O)i4 la$If7kdz$$Ifl0n(O)i4 la<$If$IfLLLLLLLMML7kd|$$Ifln(O)i4 la$If7kd|$$Ifln(O)i4 la$If7kd{$$Ifln(O)i4 laMM M M+M,M-M.ML7kd}$$Ifln(O)i4 la$If7kdN}$$Ifln(O)i4 la$If7kd|$$Ifln(O)i4 la.M/MKMLMMMNMOM$If7kd}~$$Ifln(O)i4 la$If7kd~$$Ifln(O)i4 laOMPMQMRMSMTMUMeJkdy$$Ifl80n(t"i4 la$IfJkd~$$Ifl80n(t"i4 laUMVMcMxMMMMMMB $x$Ifa$]kd$$Ifl F n(( i    4 la $x$Ifa$Jkd$$Ifl0n(t"i4 laMMMMMM $x$Ifa$]kdF$$Ifl F n(( i    4 la $x$Ifa$MMMMMMM0PPPRRRRR-S.S@UAUZU[U\UUUUUVVV VVBVJVVVVVVVVWWWWWXXWXXXµǮ¡Ǯ”Ǯ‡ǮzǮjwhfS6UjhfS6Uj]hfS6UjhfS6U hfS0J6jKhfS6U hfS6jhfS6UhfSfH`q hfS5fH`q hfS5 hfSCJhfS hfSCJ0MMMMMM/]kdh$$Ifl F n(( i    4 la $x$Ifa$ $x$Ifa$]kdׁ$$Ifl F n(( i    4 laMMMMNQFQSV~ZZZ\^^^^^^^ & F & F & Fx & F4kd$$Ifl)*4 la($IfXXXXXXXYY>Y?Y@YfYgYYYZZZ%Z&ZZZ&['[M[N[O[j[k[[[[[[\\\\__```M`N` ccظòج؂u؂jɈhfS6UjhfSCJUmHnHujLhfS6UjhfS6U hfSCJ hfS0Jj*hfSUjhfSUjhfS6UhfS hfS0J6jhfS6UjhfS6U hfS6.^^^^^^^^_____a coepeeeeee$ & F 0x$Ifa$ $ & Fxa$$a$ & Fxcpeeeefffffgggg6g7g8gDgEghhmhqhrhhhhhhhhhѸݩѸݕscݕP$jHh;SfhfSOJQJUHh;SfhfS0JOJQJj#hfSOJQJU$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;SfjhfSOJQJUhfS0JOJQJj*hfSOJQJUjhfSOJQJUhfSOJQJ hfS5hfS5OJQJhfS56CJhfSeefSfqfffffgdTTTTTTTT & F 0x$Ifkd:$$IfTx44      \ (%b P 0      4 xaf4T gFg`ggggghhhUkdD$$IfTx4      \ (%b P 0      4 xaf4T & F 0x$If h2hVhWhmhhhAiiiBjjR & F 0x$C$Eƀ;SfIf & F 0x$If hiii?i@iAiEiFiliminiiiiiiiiiiiiii(j)j*j@jAjȸȰȸȰpȸȰZȸ*jwHh;SfhfSOJQJU*j֍Hh;SfhfSOJQJU*jKHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;SfhfSOJQJHh;SfhfS0JOJQJ$jHh;SfhfSOJQJU*jHh;SfhfSOJQJUHh;SfhfSOJQJAjBjCjhjijjjjjjjjjjjjkk1k2k3kDkEkFkGkk޷֫֜q[KGhfSHh;SfhfS0JOJQJ*j6Hh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJUhfS0JOJQJjhfSOJQJUjhfSOJQJUHh;SfhfS0JOJQJjhfSOJQJUhfSOJQJ$jHh;SfhfSOJQJUHh;SfhfSOJQJjjkFk & F 0x$IfQ & F 0x$Eƀ;SfIfFkGktkkkklWllmeUUUUUUUU & F 0x$Ifkdː$$IfTx4      \ (%b P 0      4 xaf4T kkkkkklll ll5l6l7lUlVlWl]l^lllllllʴؤ؜{eU{?U*jHh;SfhfSOJQJUHh;SfhfS0JOJQJ*j1Hh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJUhfSOJQJHh;SfhfS0JOJQJ*jHh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;Sfllllllmmm m m5m6m7mLmMmNmOmomʴؤ؜{eUG4G$jHh;SfhfSOJQJUHh;SfhfSOJQJHh;SfhfS0JOJQJ*jHh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJUhfSOJQJHh;SfhfS0JOJQJ*jYHh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;SfmNmmmmmR & F 0x$C$Eƀ;SfIf & F 0x$IfompmqmmmmmLnMnrnsntnnnnnnn׿t`M?Hh;SfhfSOJQJ$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;SfHh;SfhfS0JOJQJ*jݕHh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJUhfShfSOJQJHh;SfhfS0JOJQJ$jHh;SfhfSOJQJU*jHh;SfhfSOJQJUmmn5n6nLnnneUUUUUU & F 0x$Ifkd $$IfTx4      \ (%b P 0      4 xaf4TnnnnnnnnnnoooooGoHoIocodoeofoo׹׹׹|nX|H|n|nHh;SfhfS0JOJQJ*j~Hh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;Sf*jHh;SfhfSOJQJUHh;SfhfSOJQJHh;SfhfS0JOJQJ$jHh;SfhfSOJQJU*jfHh;SfhfSOJQJUnooeoopIR & F 0x$C$Eƀ;SfIf & F 0x$IfR & F 0x$C$Eƀ;SfIfooooooooooooppp,p-p.pYpZpppppq׿tdNdJhfS*j;Hh;SfhfSOJQJUHh;SfhfS0JOJQJ*jHh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;SfhfSOJQJHh;SfhfS0JOJQJ$jHh;SfhfSOJQJU*jHh;SfhfSOJQJUp[pvppp & F 0x$IfR & F 0x$C$Eƀ;SfIfpppqqq[qqqDreUUUUUUUU & F 0x$IfkdΙ$$IfTx4      \ (%b P 0      4 xaf4T q q!qDqEqFqYqZq[qaqbqqqqqqqqqqʴؤ؜{eUB4Hh;SfhfSOJQJ$jHh;SfhfSOJQJUHh;SfhfS0JOJQJ*j$Hh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJUhfSOJQJHh;SfhfS0JOJQJ*jHh;SfhfSOJQJUHh;SfhfSOJQJ$jHh;SfhfSOJQJU'hfSOJQJcHdhdhdh;Sfqqqqqqqr r rrr!r,r2rBrCrDrErirjrkrrrrrrrrrrssPsQs|s׿pa]hfSjBhfSOJQJUjhfSOJQJUhfS0JOJQJHh;SfhfS0JOJQJhfS0JOJQJj4hfSOJQJUjhfSOJQJUhfSOJQJHh;SfhfS0JOJQJ$jHh;SfhfSOJQJU*jHh;SfhfSOJQJU#Drrrrrss#s9s:sUkdם$$IfTx4      \ (%b P 0      4 xaf4T & F 0x$If :sPssstRtttuu,uOu & F 0x$If |s}s~ssssssssssssssstttt?t@tAtPtQtRtStytzt{ttttttttttttttt uuOuҴҥҖ҇xjhfSOJQJUj{hfSOJQJUjhfSOJQJUjahfSOJQJUjҟhfSOJQJUj=hfSOJQJUhfSOJQJhfS0JOJQJjhfSOJQJUjhfSOJQJU/OuPuquuuuuuueUUUUUUU & F 0x$Ifkd$$IfTx4      \ (%b P 0      4 xaf4TOuPuuuQvvvvvvvv%w/wzzz;{<{{()vȎe=>defߡžŸŸ֑hfS56CJ hfS0J6jxhfS6UjhfS6UhfS56CJ hfS>*CJ hfSCJ hfS6CJ hfS5CJhfSCJOJQJ hfS6 hfS0JjhfSUjhfSUhfSOJQJhfS2uuv v.vQveUUUU & F 0x$Ifkdn$$IfTx4      \ (%b P 0      4 xaf4TQvRvSvvxqzzze^\ZZVL <<$Ifx & Fxkd?$$IfTx4      \ (%b P 0      4 xaf4Tzz;{ <<$Ifmkd$$IfTlN ` 04 lap T;{<{{ <<$Ifmkd:$$IfTlN ` 04 lap T{{{{[|J~xmmm  & Fx^  & F x & Fxxmkd٦$$IfTlN ` 04 lap T vȎf>mВғ, & F8x^8` & F8x^8` & F8x^8`x & F>^x^  & Fx^ & FxfߡXgkd$$IfTl40& ``4 laf4pT$   P<<$Ifa$$$xa$x & F 8x^8` & F8x^8`ߡPQɢʢ~  fg{|ܥ5ab٧ڧ>kͩΩѩ妛囑hfSCJOJQJhfSCJOJQJhfS5CJOJQJhfS56CJ hfS0JjhfSUjhfSUjhfSCJUmHnHu hfS6CJ hfSCJhfShfSOJQJ hfS5hfS5OJQJ5PQgɢʢ4Lkd$$IfTl40&4 laf4T$   P <<$If^ `a$Lkd$$IfTl40&4 laf4T   P<<$IfʢˢSLkdE$$IfTl40&4 laf4T   P<<$If$   P<<$Ifa$ ~<Lkd$$IfTl40&4 laf4T   P<<$If$   P<<$Ifa$Lkd$$IfTl40&4 laf4T~Ѥ   <Lkd$$IfTl40&4 laf4T   P<<$If$   P<<$Ifa$LkdR$$IfTl40&4 laf4T fgh{|ݥ<6<Lkd_$$IfTl40&4 laf4T$   P<<$Ifa$Lkd$$IfTl40&4 laf4T   P<<$Ifݥ6a"#>kl $<<$Ifa$ $x$Ifa$$a$x ͩqfYYYY $<<$Ifa$ $x$Ifa$kd$$IfTl40e&`L ``04 laf4pTͩΩѩL??22 $<x$Ifa$ $<<$Ifa$kd$$IfTl4re7&     04 laf4p T $<x$Ifa$" $<<$Ifa$kd$$IfTl4re7&       (04 laf4p(T$ 0 <x$Ifa$ $<x$Ifa$ $<<$Ifa$%" $<<$Ifa$kdԳ$$IfTl4re7&   `(04 laf4p(T%;CDG]efƪ "#$-.ǫȫ/02~֬׬٬   noܼǶܭ܂ǶzqhfS5OJQJjhfSUjhfSUhfSCJOJQJ hfS5hfS5OJQJhfSOJQJhfS56CJ hfS0JjhfSUjhfSU hfS6CJ hfSCJhfShfSCJOJQJhfSCJOJQJhfS5CJOJQJ,%;=?AC$ 0 <x$Ifa$ $<<$Ifa$CDG]- $<<$Ifa$kd$$IfTl4re7&   04 laf4pT]_ace$ 0 <x$Ifa$efǪ-)'xkd$$IfTl4re7&   `04 laf4pTǫ/bU $<<$Ifa$kd&$$IfTl0l ,"  ``04 lapT $<<$Ifa$$a$/02~t $<<$Ifa$ $<<$Ifa$pkd $$IfTl0l ," 04 laT~֬t $<<$Ifa$ $<<$Ifa$pkd$$IfTl0l ," 04 laT֬׬٬t $<<$Ifa$ $<<$Ifa$pkdS$$IfTl0l ," 04 laTEnѯ]ʳvx~zq$`a$$a$$a$xpkd$$IfTl0l ," 04 laT ,-.WX 89abcdstuwxʴߴ#$'?Z[ٻٻٻٰ٪ٌxٖnhfSCJ OJQJhfS5CJOJQJhfS5OJQJhfSCJOJQJhfS5CJOJQJhfS56CJ hfSCJjhfSU hfS5 hfS0JjhfSUjhfSUhfS hfS0J6jhfS6U hfS6jhfS6U+ߴ #ikd$$IfTl40e&L ``4 laf4pT $<<$Ifa$#$'?DIRZwjj____ $x$Ifa$ $<<$Ifa$kd $$IfTl4re6&4 laf4TZ[e<//$ $x$Ifa$ $<<$Ifa$kd#$$IfTl4re6&  24 laf4p2T[eĵ!#=?@0BVWXvwy:;?lܹ{nhYR+hYR+0J)OJQJhfS0J)OJQJjhfS0J)OJQJUjhfS0J)CJOJQJU hfS5hfSOJQJhfS5OJQJhfS56CJjhfSCJUmHnHuhfSCJOJQJhfShfSCJOJQJhfSCJ OJQJhfS5CJOJQJ+1kd$$IfTl4re6&  24 laf4p2T $x$Ifa$ĵ͵ֵ޵ $x$Ifa$ $<<$Ifa$#+<//$ $x$Ifa$ $<<$Ifa$kd$$IfTl4re6&       24 laf4p2T+38=>?ABGEEEkd$$IfTl4re6&      4 laf4pT $x$Ifa$BCDEFa0BV$ & F<<$Ifa$$ & Fa$ & Fx VWXZv{jjj$ & F<<$Ifa$kd!$$IfTlF_%efl0    4 laTvwy{{jjj$ & F<<$Ifa$kd$$IfTlF_%efl0    4 laT{jjj$ & F<<$Ifa$kd$$IfTlF_%efl0    4 laT!:{jjj$ & F<<$Ifa$kd3$$IfTlF_%efl0    4 laT:;?Cl{jjj$ & F<<$Ifa$kd$$IfTlF_%efl0    4 laTlmnѻ9D{ttpnttttee  & Fxxx & Fxkd$$IfTlF_%efl0    4 laT ɽʽоѾ'(PQR   +,STU^_ِٰٛم}shfSCJOJQJhfSOJQJj[hfSUjhfSUhfSB*phj?hfS6UjhfSCJUmHnHu hfS0JjhfSUjhfSUhfS hfS0J6j7hfS6U hfS6jhfS6U-yg]g  & F<<$If & Fxxvjj  & Fx$Ifkd$$Ifl*0 $n  04 lap !PQVW|}~z{@ACWmnҹͬҹ͟ҹ͒ҹͅҹjRhfS6UjhfS6UjnhfS6UjhfS6U hfS0J6jThfS6U hfS6jhfS6UhfSB*CJOJQJphhfSCJOJQJhfSOJQJhfS1xll  & Fx$Ifkd$$Ifl0 $n  ``04 lapxjj  & F<<$Ifkd$$Ifl0 $n  @@04 lap zVWunnlhhhhhhx & Fxkdk$$Ifl40 $n  04 laf4p Wmnpzx$If_kd$$Ifl4    .V t0  4 laD f4 $x$Ifa$nopz{}~UVopq_`}~߿߿ߌtnfhfSB*ph hfS0Jj hfSB*UphhfSB*phjhfSB*UphjQ hfS6U hfS0J6j hfS6U hfS6jhfS6U jMhfSOJQJ jhfSOJQJhfShfSCJOJQJhfSOJQJ jOhfSOJQJ'z{~x$Ifokd^$$Ifl   0 :.b t0  4 laD x$Ifokd $$Ifl   0 :.b t0  4 laD x$Ifokd$$Ifl   0 :.b t0  4 laD ;.qq$ @x$Ifa$$$xa$xokdC $$Ifl   0 :.b t0  4 laD ./=>BClmn֫š֍vnfZfKZjhfSOJQJUjhfSOJQJUhfSOJQJhfSOJQJ jOhfSCJOJQJhfSCJOJQJ jhfSCJOJQJ!hfS0JCJOJQJmH nH u,jR hfSCJOJQJUmH nH u&jhfSCJOJQJUmH nH uhfSCJOJQJmH nH uhfS hfS5hfS5OJQJhfS56CJ./048=wwww$ @x$Ifa$vkd= $$IfTs4    0$W0    4 saf4T=>cR@@@$ @x$Ifa$ @x$Ifkd( $$IfTs4    \}$TU0    4 saf4TcTBBB$ @x$Ifa$ @x$Ifkd $$IfTs4    \}$TU0    4 saf4T  456VWXZ[]^`abhijzhfSOJQJU jMhfSCJOJQJjhfSOJQJUjdhfSOJQJUhfSOJQJhfSCJOJQJ jhfSCJOJQJhfSOJQJjhfSOJQJUhfS0JOJQJ4X[^acTBBB$ @x$Ifa$ @x$Ifkdr$$IfTs4    \}$TU0    4 saf4TabcTBBB$ @x$Ifa$ @x$Ifkd$$IfTs4    \}$TU0    4 saf4TcTBBB$ @x$Ifa$ @x$Ifkd$$IfTs4    \}$TU0    4 saf4T%&UVWmnopqrsuvw}~45678پʹ٧ᖎ}٧nʹٚjhfSOJQJU hfS0Jj|hfSUjhfSUhfS jhfSCJOJQJ jMhfSCJOJQJhfS0JOJQJjhfSOJQJUjhfSOJQJUhfSOJQJhfSOJQJhfSCJOJQJ jhfSCJOJQJ+oqsvcTBBB$ @x$Ifa$ @x$Ifkd$$IfTs4    \}$TU0    4 saf4TvwcTBBB$ @x$Ifa$ @x$Ifkd$$IfTs4    \}$TU0    4 saf4T68:<cTBBB$ @x$Ifa$ @x$Ifkd $$IfTs4    \}$TU0    4 saf4T89:;<=CDopq%CDERSYZپʹ٧ًʹ٧ytyt hfS5hfS5OJQJhfS56CJj hfSOJQJU jOhfSCJOJQJ jhfSCJOJQJhfS0JOJQJjhfSOJQJUjhfSOJQJUhfSOJQJhfSOJQJhfSCJOJQJ jhfSCJOJQJ,<=cTBBB$ @x$Ifa$ @x$Ifkd$$IfTs4    \}$TU0    4 saf4TcTBBB$ @x$Ifa$ @x$Ifkd$$IfTs4    \}$TU0    4 saf4T%/Cc^LL$ @x$Ifa$$a$kd$$IfTs4    \}$TU0    4 saf4TCDEIMRwwww$ @x$Ifa$vkd$$IfTl4      0$W0      4 laf4TRScTTBBB$ @<$Ifa$ @<$Ifkdx$$IfTl4      \$TU0      4 laf4T&'(stuwxyz{|}ҶuҶfj#hfSOJQJUj0!hfSOJQJU hfS5 jOhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS5OJQJ jhfSCJOJQJjLhfSOJQJUhfSOJQJhfS0JOJQJjhfSOJQJUjhfSOJQJU(uxz|cTBBB$ @<$Ifa$ @<$Ifkd$$IfTl4      \$TU0      4 laf4T|}cTBBB$ @<$Ifa$ @<$Ifkd!$$IfTl4      \$TU0      4 laf4T !'(OPQowjjfWMhfS0JOJQJjl&hfSOJQJUhfS jhfSCJOJQJ jMhfSCJOJQJ!hfS0JCJOJQJmH nH u,j$hfSCJOJQJUmH nH u&jhfSCJOJQJUmH nH uhfSCJOJQJmH nH u hfS5hfSCJOJQJ jhfSCJOJQJhfSOJQJjhfSOJQJU cR@@@$ @<$Ifa$ @<$Ifkd#$$IfTl4      \$TU0      4 laf4T !qsvxcTBBB$ @<$Ifa$ @<$Ifkdu%$$IfTl4      \$TU0      4 laf4Topqrsuvwxy,-./023456=>fgh~j*hfSOJQJUjf)hfSOJQJUhfS0JOJQJj'hfSOJQJUhfS jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfSOJQJjhfSOJQJU7xycTBBB$ @<$Ifa$ @<$Ifkd&$$IfTl4      \$TU0      4 laf4T.035cTBBB$ @<$Ifa$ @<$Ifkd}($$IfTl4      \$TU0      4 laf4T56cTBBB$ @<$Ifa$ @<$Ifkd *$$IfTl4      \$TU0      4 laf4TcTBBB$ @<$Ifa$ @<$Ifkd+$$IfTl4      \$TU0      4 laf4T>?@ABDEFGHNOz{|ŻŻŻŻŻwjhfSUjZ/hfSOJQJUj-hfSOJQJUhfS jOhfSCJOJQJ jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS0JOJQJjl,hfSOJQJUhfSOJQJjhfSOJQJU.@BEGcTBBB$ @<$Ifa$ @<$Ifkd,$$IfTl4      \$TU0      4 laf4TGHcTBBB$ @<$Ifa$ @<$Ifkdq.$$IfTl4      \$TU0      4 laf4T  cTBBB$ @<$Ifa$ @<$Ifkd/$$IfTl4      \$TU0      4 laf4T    8VWXelmǺǺǶޤ추ǺǺǶ춉Ǻ|Ƕ jhfSCJOJQJj5hfSUjL4hfSU hfS5hfS5OJQJhfS56CJhfS jhfSCJOJQJhfSCJOJQJ jMhfSCJOJQJhfSOJQJ hfS0JjhfSUj0hfSU0 8BVc[[II$ `@ x$Ifa$$$xa$kdW1$$IfTl4      \$TU0      4 laf4TVWX\`e|jjj$ `@ x$Ifa$ `@ x$IfskdN2$$IfTl4      0 $"40      4 laf4Tefe_MMM$ `@ x$Ifa$$Ifkd03$$IfTl4      \ x:$"0      4 laf4Te_MMM$ `@ x$Ifa$$Ifkd4$$IfTl4      \ x:$"0      4 laf4T358:eVDDD$ `@ x$Ifa$ `@ x$Ifkd66$$IfTl4      \ x:$"0      4 laf4T12345789:@Ajkl45678:;<=DEno˾˾˾˾j<hfSUjG;hfSUj9hfSUjw8hfSU jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS hfS0JjhfSUj7hfSU9:;eVDDD$ `@ x$Ifa$ `@ x$Ifkd7$$IfTl4      \ x:$"0      4 laf4TeVDDD$ `@ x$Ifa$ `@ x$Ifkd9$$IfTl4      \ x:$"0      4 laf4T68;=eVDDD$ `@ x$Ifa$ `@ x$Ifkdq:$$IfTl4      \ x:$"0      4 laf4T=>eVDDD$ `@ x$Ifa$ `@ x$Ifkd;$$IfTl4      \ x:$"0      4 laf4Top-./012356=>defuvwxyz{}~j@hfSUjz?hfSU jMhfSCJOJQJhfSOJQJj>hfSU jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS hfS0JjhfSU7eVDDD$ `@ x$Ifa$ `@ x$IfkdA=$$IfTl4      \ x:$"0      4 laf4T/136eVDDD$ `@ x$Ifa$ `@ x$Ifkd>$$IfTl4      \ x:$"0      4 laf4T67wy{~eVDDD$ `@ x$Ifa$ `@ x$Ifkd @$$IfTl4      \ x:$"0      4 laf4T~eVDDD$ `@ x$Ifa$ `@ x$IfkdrA$$IfTl4      \ x:$"0      4 laf4T#$%&')*+,45[\]~˾˾⠓˾jsFhfSU jMhfSCJOJQJhfSOJQJjEhfSUjChfSU jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS hfS0JjhfSUjHBhfSU5%'*,eVDDD$ `@ x$Ifa$ `@ x$IfkdB$$IfTl4      \ x:$"0      4 laf4T,-eVDDD$ `@ x$Ifa$ `@ x$Ifkd8D$$IfTl4      \ x:$"0      4 laf4TeVDDD$ `@ x$Ifa$ `@ x$IfkdE$$IfTl4      \ x:$"0      4 laf4TEFGHIJKLMNUV~ !#$&'()+򷫷zjJhfSU jMhfSCJOJQJhfS0JOJQJj[IhfSOJQJUjhfSOJQJUhfSOJQJ jhfSCJOJQJ jhfSCJOJQJ hfS0JjGhfSUjhfSUhfShfSCJOJQJ-GIKMeVDDD$ @x$Ifa$ `@ x$Ifkd G$$IfTl4      \ x:$"0      4 laf4TMNeVDDD$ @x$Ifa$ @x$IfkdwH$$IfTl4      \ x:$"    0      4 laf4T!$')eVDDD$ `@ x$Ifa$ `@ x$IfkdI$$IfTl4      \ x:$"0      4 laf4T)*+,V`teWLG55$  x$Ifa$$a$   x $  xa$kdKK$$IfTl4      \ x:$"0      4 laf4T+,-Vtuv!"#$%&'-.RϴêϝܝܝώêρwwhfSCJOJQJ jhfSCJOJQJjOhfSOJQJU jhfSCJOJQJhfS0JOJQJj-NhfSOJQJUjhfSOJQJUhfSOJQJ hfS5hfS5OJQJhfS56CJhfShfSOJQJmH nH u)tuvz~yyyy$  x$Ifa$skd/L$$IfTl4      0 $"40      4 laf4TeVDDD$  x$Ifa$  x$IfkdM$$IfTl4      \ x:$"0      4 laf4T"$&eVDDD$  x$Ifa$  x$IfkdN$$IfTl4      \ x:$"0      4 laf4T&'acfieVDDD$  x$Ifa$  x$IfkdwP$$IfTl4      \ x:$"0      4 laf4TRST_`abcefhipq()ŻŻŻŻŻ~ŻŻsjUhfSUjThfSUhfSOJQJ jhfSCJOJQJ hfS0Jj6ShfSUjhfSUhfShfSCJOJQJ jhfSCJOJQJhfSOJQJhfS0JOJQJjhfSOJQJUjQhfSOJQJU-ijeVDDD$  x$Ifa$  x$IfkdR$$IfTl4      \ x:$"0      4 laf4TeVDDD$  x$Ifa$  x$IfkdS$$IfTl4      \ x:$"0      4 laf4TCEHJeVDDD$  x$Ifa$  x$Ifkd"U$$IfTl4      \ x:$"0      4 laf4T)*ABCDEGHIJKQR}~  ./0jklnopqrstz{j'ZhfSUjXhfSUj[WhfSUhfSOJQJ jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS hfS0JjhfSU;JKeVDDD$ `@ x$Ifa$  x$IfkdV$$IfTl4      \ x:$"0      4 laf4TeVDDD$ `@ x$Ifa$  x$IfkdW$$IfTl4      \ x:$"0      4 laf4TloqseVDDD$  x$Ifa$  x$IfkdQY$$IfTl4      \ x:$"0      4 laf4TsteVDDD$  x$Ifa$  x$IfkdZ$$IfTl4      \ x:$"0      4 laf4T :;<OPQRSUVWXY`a˾˾˶˾˞˶˾˞˶˞j_hfSUjF^hfSU jhfSCJOJQJj\hfSUhfSOJQJ jhfSCJOJQJhfSCJOJQJ jMhfSCJOJQJhfS hfS0JjhfSUj[hfSU5eVDDD$  x$Ifa$  x$Ifkd\$$IfTl4      \ x:$"0      4 laf4T QSVXeVDDD$  x$Ifa$  x$Ifkdp]$$IfTl4      \ x:$"0      4 laf4TXYeVDDD$ @x$Ifa$  x$Ifkd^$$IfTl4      \ x:$"0      4 laf4T89:lmnopqrtuvݷݬݟᖍzpzjhfS@U hfS@hfSOJQJ hfS5hfS5OJQJhfS56CJ jMhfSCJOJQJjmbhfSU jhfSCJOJQJ hfS0JjahfSUjhfSUhfShfSOJQJ jOhfSCJOJQJhfSCJOJQJ+eVDD2$ @x$Ifa$$  x$Ifa$  x$Ifkd0`$$IfTl4      \ x:$"0      4 laf4TnprueVDDD$ @x$Ifa$  x$Ifkda$$IfTl4      \ x:$"0      4 laf4Tuvwe]]II$  x$Ifa$$$xa$kdb$$IfTl4      \ x:$"0      4 laf4Twwww$  x$Ifa$skdc$$IfTl4      0 $"F0      4 laf4T  eT@@@$  x$Ifa$  x$Ifkdd$$IfTl4      \ x:$"0      4 laf4T    CDEXYZ[\^_`ahiŸŸܰܥŸܰܔܸŸŸܰ܉ܸŸŸjihfSUj8hhfSU hfS0JjfhfSUjhfSU jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfShfS0J@jhfS@UjehfS@U6Z\_aeT@@@$  x$Ifa$  x$Ifkdf$$IfTl4      \ x:$"0      4 laf4TabeT@@@$  x$Ifa$  x$Ifkdpg$$IfTl4      \ x:$"0      4 laf4TeT@@@$  x$Ifa$  x$Ifkdh$$IfTl4      \ x:$"0      4 laf4TEGIKeT@@@$  x$Ifa$  x$Ifkdj$$IfTl4      \ x:$"0      4 laf4T*+,CDEFGHIJKLRSstuEFGמדצמׂjnhfSU hfS0JjzmhfSUjhfSU jhfSCJOJQJj/lhfS@UhfSCJOJQJ jhfSCJOJQJhfShfS0J@jjhfS@UjhfS@U hfS@3KLeT@@@$  x$Ifa$  x$Ifkdgk$$IfTl4      \ x:$"0      4 laf4TeT@@@$  x$Ifa$  x$Ifkdl$$IfTl4      \ x:$"0      4 laf4TGILNeT@@@$  x$Ifa$  x$Ifkdn$$IfTl4      \ x:$"0      4 laf4TGHIKLMNUV   3 4 5 6 7 8 9 : ; B C t u v     пزاп؜пؑпزj/thfSUjrhfSUjqhfSU jMhfSCJOJQJ hfS0Jj*phfSUjhfSUhfS jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJ9NOeT@@@$  x$Ifa$  x$Ifkdbo$$IfTl4      \ x:$"0      4 laf4TeT@@@$  x$Ifa$  x$Ifkdp$$IfTl4      \ x:$"0      4 laf4T5 7 9 ; eT@@@$  x$Ifa$  x$Ifkdr$$IfTl4      \ x:$"0      4 laf4T; <     eT@@@$  x$Ifa$  x$Ifkdgs$$IfTl4      \ x:$"0      4 laf4T           , . / 0 1 2 3 L N O P Q R S u v w y z | }                             4 5 6 8 9 : ; t u v w  jhfSCJ hfSCJ jhfSCJ hfS5hfS56CJhfS jhfSCJOJQJ jMhfSCJOJQJhfSCJOJQJF      eYTBB$ ` px$Ifa$$a$  xkdt$$IfTl4      \ x:$"0      4 laf4T      yyyy$ ` px$Ifa$skdu$$IfTl4      0 $"40      4 laf4T  , / 1 3 eVDDD$ ` px$Ifa$ ` px$Ifkdv$$IfTl4      \ x:$"0      4 laf4T3 4 L O Q S eVDDD$ ` px$Ifa$ ` px$Ifkd^w$$IfTl4      \ x:$"0      4 laf4TS T u w z } eVDDD$ ` px$Ifa$ ` px$Ifkd4x$$IfTl4      \ x:$"0      4 laf4T} ~     eVDDD$ ` px$Ifa$ ` px$Ifkd y$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkdy$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkdz$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd{$$IfTl4      \ x:$"0      4 laf4T  4 6 9 ; eVDDD$ ` px$Ifa$ ` px$Ifkdb|$$IfTl4      \ x:$"0      4 laf4T; < t v x z eVDDD$ ` px$Ifa$ ` px$Ifkd8}$$IfTl4      \ x:$"0      4 laf4Tw x y z                             6 7 8 9 : ; < a b c d e f g                             4 6 7 8 9 : ; = a hfS56CJ jMhfSCJ jhfSCJhfS jhfSCJ hfSCJRz {     eVDDD$ ` px$Ifa$ ` px$Ifkd~$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd~$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd$$IfTl4      \ x:$"0      4 laf4T  6 8 : < eVDDD$ ` px$Ifa$ ` px$Ifkdf$$IfTl4      \ x:$"0      4 laf4T< = a c e g eVDDD$ ` px$Ifa$ ` px$Ifkd<$$IfTl4      \ x:$"0      4 laf4Tg h     eVDDD$ ` px$Ifa$ ` px$Ifkd$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd$$IfTl4      \ x:$"0      4 laf4T      eVDDD$ ` px$Ifa$ ` px$Ifkd$$IfTl4      \ x:$"0      4 laf4T  4 7 9 ; eVDDD$ ` px$Ifa$ ` px$Ifkdj$$IfTl4      \ x:$"0      4 laf4T; < = a k  eaYHH$ p x$Ifa$$$xa$xkd@$$IfTl4      \ x:$"0      4 laf4Ta k                                0123456NOPRSTUghiklno jMhfSCJOJQJ jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS hfS5hfS5OJQJJ      zzzz$ p x$Ifa$skd$$$IfTl4      0 D%"0      4 laf4T      eWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4T      eWFFF$ p x$Ifa$ p x$Ifkd܉$$IfTl4      \ x:D%" 0      4 laf4T      eWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4T      eWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4T  eWFFF$ p x$Ifa$ p x$Ifkd^$$IfTl4      \ x:D%" 0      4 laf4T0246eWFFF$ p x$Ifa$ p x$Ifkd4$$IfTl4      \ x:D%" 0      4 laf4T67NPSUeWFFF$ p x$Ifa$ p x$Ifkd $$IfTl4      \ x:D%" 0      4 laf4TUVgiloeWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4TopeWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4TeWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4TeWFFF$ p x$Ifa$ p x$Ifkdb$$IfTl4      \ x:D%" 0      4 laf4T FGH[\]^_abdejk пп}u}uhfSOJQJhfS5@OJQJhfS56CJ hfS@jhfSCJUmHnHuj}hfSU jMhfSCJOJQJ hfS0JjhfSUjhfSU jhfSCJOJQJhfSCJOJQJ jhfSCJOJQJhfS.eWFFF$ p x$Ifa$ p x$Ifkd8$$IfTl4      \ x:D%" 0      4 laf4T]_beeWFFF$ p x$Ifa$ p x$Ifkd$$IfTl4      \ x:D%" 0      4 laf4Tefhj"D=ecaa]]]][]xkd$$IfTl4      \ x:D%" 0      4 laf4T = 3d^QQ $<<$Ifa$kd$$IfTl0H$ ``04 lapT $<<$Ifa$$a$x demnP "$$$$$$$^%%%%&&'''''((({)(,),Q,R,S,,,,-(hfSeh fH`q r jGhfS6UhfSOJQJhfSCJOJQJjhfS6U hfS@ hfS0J6jhfS6U hfS6jhfS6UhfShfSCJOJQJ1de $<<$Ifa$pkd$$IfTl0H$04 laTm $<<$Ifa$pkd$$IfTl0H$04 laTmn $<<$Ifa$pkda$$IfTl0H$04 laTP $<<$Ifa$pkd$$IfTl0H$04 laTPQRr sw? }qi Lx`L & F 8x & F 8xxpkdř$$IfTl0H$04 laT ? d  !"#s#]%^%g%%x$If $x$Ifa$xx & Fx %%%&xmex$If $x$Ifa$kd$$Ifl0$X  04 lap&&&'xmex$If $x$Ifa$kdz$$Ifl0$X 04 lap''''x$If $x$Ifa$lkdO$$Ifl0$X 04 la''((x$If $x$Ifa$lkd$$Ifl0$X 04 la((({)xmex$If $x$Ifa$kd$$Ifl0$X 04 lap{)|)}))~*(,, -5-.)011xtpttttlttthxxxxkdd$$Ifl0$X  04 lap --1444555)5*5555BBD)D+DMDEEFFHHI#L$LOLPLQLwLxLzLLLLLLLLL妠及}vkjhfSU hfS0J6jchfS6U hfS6jhfS6U hfS6CJ hfSCJ hfS5CJ(hfSeh fH`q r hfS0Jj֠hfSUjhfSUhfSCJOJQJhfS+hfS5eh fH`q r *1a11!334c4455 6,72:< >@BB+DND)EEEFFx^  & Fx^ & Fxx & F xFHI KKzLLL~NPS@U$VAVwVuWXXA[S[p\\^_`a  & Fx^ & Fxxx & FxLLSS$V?ij!XYæĦΦϦ]^fg%&bҵũ}j?hfSUjbhfSUjhfSUjhfSUhfS0JCJOJQJjhfSB*UphjhfSB*UphhfSB*phhfSB*ph hfS0JjhfSUhfSjhfSU/0!>&:KJ*ְϳHv5A^μx & FL & FKbcdèĨŨߨ-.89xyz&'[\];<JKܵܪܟܔ܉jٲhfSUjlhfSUj[hfSUj2hfSUjhfSUjhfSU hfS0Jj3hfSUhfShfS0JCJOJQJjhfSUjdhfSU6#$ְ}345?@AY]`ͼμklyqkyqk hfSCJhfS@CJhfS5@CJ hfS@hfSOJQJhfS56CJ hfSCJ+hfS5eh fH`q r (hfSeh fH`q r jhfSUhfSB*ph hfS6 hfS0JjhfSUhfSjhfSU*μ#klν9kdx$$IfTl4&`'   4 laf4T$ @ x$Ifa$ @ x$If$a$ XѾyjjjjX$ @ x$Ifa$ @ x$Ifkd$$IfTl4rtT4&  4 laf4TĿ=m3ywuqqoqkqqxxkd$$IfTl4rtT4&       4 laf4T  E%,/ntu & Fx & F 8Tx^T & Fx  & Fx^$ & F 0x*$a$ %)',CO -vR^8!Z!$$%%%7%8%6';)<)i)j)޻ޮޮޮޮޮޮޡށtj*hfS6UjhfS6U hfS0JjhfSB*UphjhfSB*Uph hfS5hfSB*phhfSB*ph hfS@ hfS6hfScHdhdhdhzRhfS hfS5CJ hfS6CJ hfSCJhfS5@CJ-u  Bl|$' - & Fx & F 8x^`  & F *xx & Fx-@XhWt 5   f 2  $X]x & Fx & Fx & Fx]^ & FxvRMpBUe}8! 8x^` & FQ & FP & FR8!Z!#$7'r'())~+R./$233344L5e555626  & Fx^ & Fx & Fx & F & Fx & Fxj)k))))O*Z*[*_*c*k*0033344L5d562677#9%999 ::;;<<<v==Z>e>>>? ?y@@xCCjDkDDDDDDĮ桛jøhfS6U hfSCJ hfSCJ hfS5CJ hfS@+hfS5eh fH`q r (hfSeh fH`q r hfS5hfSB*phhfS hfS6 hfS0J6jhfS6U526|666L777L88#99 :\::;{;;<<<1=v===(>Z>e>> & Fx  & Fx^>>? ?? @y@@xCC0EBEFF%HBIYIL#L?LM$OO^$ & F 0x*$a$ & F & Fx & Fx & F  & Fx^ & FxD0EBEEE FFFHFIFJFFFBIYILL"L$O%OMONOOOOOOOOyZZZZZZZR[[[\[[[k\v\\\\\eeeʗ|hfSCJOJQJ hfS@jhfSUjhfSU+hfS5eh fH`q r (hfSeh fH`q r hfS0J hfS0J6j`hfS6U hfS6jhfS6UhfS5@CJhfS0OOO>QZRSfUVWWWWX?X[XYyZZZ ]^`bce*ghj & Fx & FT & Fxeeeeejjjjjww x x x+x,xdxxg ~ ѪҪɳޭޥޚމމމމމމމށ|ojhfS6U hfS6hfSB*ph hfS5hfS0JCJOJQJj hfSUjhfSU hfS@+hfS5eh fH`q r (hfSeh fH`q r hfS hfS0J6jhfS6Uj|hfS6U)jjj0lFl[lxlllllll mm.mAmoqs uwwdxxy & FG & Fx & FM & FM & F & Fxyy4{|}~f2QLĈrZgӌ=f~ & FH & FG~>ޏ#Rݐ(8Gd X~՗Tw2x & FI & FHxʙn7Л/CyМݜ7SYv & FJ & FI&fHӢ'vţHԤخx^ & FOxx & Fx & FJ$خuIJغٺ  >?@efɻʻ˻BCDhiμϼм  3EխjhfSUjhfSUjyhfSUhfS0JCJOJQJjhfSU hfS0JjGhfSUjhfSU hfS6 hfS5hfS hfS6@ hfS@ hfS5@0خ¯f\>\=9}>ںgj "3 & FO & FOxxx^ & FOx"d|]uy @X^_l(Luѻ!jhfSCJOJQJUjhfSCJOJQJUhfSCJOJQJhfS5CJOJQJ hfS@ hfS5hfS+hfS5eh fH`q r (hfSeh fH`q r 7z_(uF & Fx & Fx$xa$ & Fx & FOmtqr4ܸܴytgy`yZRhfSB*ph hfSCJ hfS0J6jhfS6U hfS6jhfS6U+hfS5eh fH`q r (hfSeh fH`q r hfS@hfSjhfSUmHnHuhfS5CJOJQJhfS6CJOJQJhfSCJOJQJjhfSCJOJQJUhfS0JCJOJQJ OZt 0?qL & F;x & F:x & F & Fx & Fx$ & Fx & Fx.k?r4Mox^ & Fxx & Fx & Fx & F"#NOPmnst$%*+NOPhi HIJYZAjphfSU hfS@jhfSUjdhfSUjhfSUjRhfSUjhfSU hfS0Jj2hfSUjhfSUhfShfSB*ph7&jI&OO*A & Fx & Fx & Fx & Fx & Fx^ABC~34[\]AY]  v w         )*ҹܤ܆yҹlҹܤddhfSB*phjhfS6UjhfS6UhfSB*ph+hfS5eh fH`q r (hfSeh fH`q r hfS0J6jhfS6U hfS6jhfS6UhfShfS0JCJOJQJjhfSUj hfSU'A^ 3 a@*el & FY x^`  & F x & Fx & F & p@ P !x & Fx & Fx & F p@ !x})*+\]vwx1 2 3 4         ! ! !'!(!)!ùykjhfSCJOJQJUjhfSCJOJQJUjnhfSCJOJQJUjhfSCJOJQJUjhfSCJOJQJUhfSOJQJhfSCJOJQJj:hfSCJOJQJU hfS0J6jhfS6UjhfS6UhfSB*ph hfS6hfS%_w2 3   |lkdZ$$Ifl0H$04 la$ & Fx$Ifa$ & Fx   !(!$ & Fx$Ifa$lkd$$Ifl0H$04 la(!)!!J"$ & Fx$Ifa$lkd$$Ifl0H$04 la)!*!!!!I"J"K"L"v"w"x"""|&}&&&&&&&&&%'&'++++++Ϳ߱ͧyc^ͧQj#hfS6U hfS5+hfS5eh fH`q r (hfSeh fH`q r hfS0J6jn"hfS6U hfS6jhfS6UjZhfSCJOJQJUjhfSCJOJQJUhfSjhfSCJOJQJUhfSOJQJhfSCJOJQJj&hfSCJOJQJUJ"K"w""$ & Fx$Ifa$lkdF$$Ifl0H$04 la"""R#z####%&&d & F & p@ P !x  & F x & Fxlkd!$$Ifl0H$04 la &''( *C*Y***h+u+++',-T.|...$ & F @ x$If`a$ & F @ x$If$a$ & F9 x & F9 8x & F9 *x$ & Fx & Fx & Fx++U.|...........////*/+/8/9/N/O/i/j/////////////////000a0b0c00001111111ùùùùj;0hfS6U hfS0J hfS0J6j/hfS6U hfS6hfSCJOJQJ hfSCJhfS>*CJOJQJhfSOJQJmH nH uhfSOJQJhfS56CJhfSjhfS6U7......v___ & F @ x$If` & F @ x$Ifukd#$$IfTl4       0T!`$0      4 laf4T......cP:::$ & F @ x$Ifa$ & F @ x$Ifkd1$$$IfTl4      \\)|T! $qS 0      4 laf4T......eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd%$$IfTl4      \)|T!$qS 0      4 laf4T......eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd%$$IfTl4      \)|T!$qS 0      4 laf4T....//eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd&$$IfTl4      \)|T!$qS 0      4 laf4T//////eR<<<$ & F @ x$Ifa$ & F @ x$Ifkdh'$$IfTl4      \)|T!$qS 0      4 laf4T//%/'/)/*/eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd0($$IfTl4      \)|T!$qS 0      4 laf4T*/+/4/5/7/8/eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd($$IfTl4      \)|T!$qS 0      4 laf4T8/9/K/L/M/N/eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd)$$IfTl4      \)|T!$qS 0      4 laf4TN/O/e/g/h/i/eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd*$$IfTl4      \)|T!$qS 0      4 laf4Ti/j/}////eR<<<$ & F @ x$Ifa$ & F @ x$IfkdP+$$IfTl4      \)|T!$qS 0      4 laf4T//////eR<<<$ & F @ x$Ifa$ & F @ x$Ifkd,$$IfTl4      \)|T!$qS 0      4 laf4T////eV9kd-$$IfTl4 laT$ & Fx$Ifa$kd,$$IfTl4      \)|T!$qS 0      4 laf4T/////////ILkdb.$$IfTl04 laTLkd-$$IfTl04 laT$ & F<$Ifa$$ & F<$Ifa$/////1h22LEE> & Fx & FxLkd2/$$IfTl04 laT$ & F<$Ifa$  & F<$IfLkd.$$IfTl04 laT2334045~66667P7ulkdA$$Ifl0,"[=04 lax$Ifx $ & Fxa$ & Fx 1@4A4}4~666666677P7Q7R777788898d8e88F:^:b:4<ԪԎr]G+hfS5eh fH`q r (hfSeh fH`q r jlnhfSCJOJQJUjehfSCJOJQJUj\hfSCJOJQJUj*ThfSCJOJQJUjKhfSCJOJQJUjxBhfSCJOJQJUjV9hfSCJOJQJUhfSCJOJQJj0hfSCJOJQJUhfSB*phhfSB*phhfSP7Q7778x$IflkdS$$Ifl0,"[=04 la7888d88x$IflkdJe$$Ifl0,"[=04 la8889F:c::4<Q<v<J>i>>rnx & p@ P !xxxlkdv$$Ifl0,"[=04 la 4<L<P<J>b>h>>>>>>?? A A9A:A;AbAcA BBBUCVCoCpCqCCCDDDIIMMvQQSSTBTTTµǮ¡Ǯљ„Ǯ}w}w}w}wpw} hfS6CJ hfSCJ hfS5CJjxhfS6UhfSB*phhfSB*phj#xhfS6U hfS0J6jwhfS6U hfS6jhfS6UhfS+hfS5eh fH`q r (hfSeh fH`q r ,>gABDDDGG HFHyHHHIIIKMMvQQSSTTx^  & FZx` & Fx & F 8xxxTVVzXXXZ[]^j^^^_`bxcce=fggh0h & Fx & F & Fx & Fxx  & FZx`x^TVVzXXXX^^^^^^^^^^bbbbbbbbbccc9c:c@dAdhdidjdwdxd=ffXgļıġġ{ġnfhfSB*phjzhfS6UjKzhfS6U hfS0J6jyhfS6U hfS6jhfS6U hfS0Jj+yhfSUjhfSUhfS+hfS5eh fH`q r (hfSeh fH`q r hfS5CJ hfSCJ(XgYggggggekfkkkkkkllllllllllllmmCmDmEmHmImJmKmqmrmmmmmmmٹٹ}ٹl!j}hfSCJOJQJU!jV~hfSCJOJQJUhfS0JCJOJQJ!j/}hfSCJOJQJUjhfSCJOJQJUhfSCJOJQJhfS5OJQJj{hfS6UhfS hfS0J6jm{hfS6U hfS6jhfS6U*0hMhfh|hhh i+ijlllllll $x$Ifa$xx & Fxllll $x$Ifa$pkd|$$IfTl0D8x04 laTllmJm $x$Ifa$pkd}$$IfTl0D8x04 laTJmKmqmm $x$Ifa$pkd~$$IfTl0D8x04 laTmmmpooqsuuvwwxx5x{{{  & FU[<^[xxpkd$$IfTl0D8x04 laTmqqqqqrrrrGrHrIrjrkrvswssssssuuuxxxx yyy9y:yyyk}}}ۆ܆}ujhfSU hfS>*jEhfS6U hfS5+hfS5eh fH`q r (hfSeh fH`q r jԁhfS6Uj?hfS6U hfS0J6jhfS6U hfS6jhfS6UhfS.5xFxRxjx}xxxxyy4||k}}}րZ[]xxx  & FU[<^[܆%&'G_efKLwxy}ÍZ012VW͜ӜΧԧPϺ׍ܚϺxܚjhfS6UhfSB*phjchfS6UjhfS6U+hfS5eh fH`q r (hfSeh fH`q r hfSB*ph hfS6 hfS0J6 hfS0JjhfSUjڂhfSUhfS/GfHŋ}ÍZБ"8Mgŕ & F[ 8x$a$x & Fxŕߕ4PcY{ϛԜC!͠}ˤE & F8x^8 & Fx & Fx & F[ 8xTxէܩ$Aju Y`x & F 8x & Fx & Fx x^` & F8x^8PQ|}~Ưʯ  6:ѿZ[./@X\ITĮĮ٩٩Į١ĮĮ١ق١ٌĮ٩hfSCJOJQJhfSB*phjhfS6UhfSB*ph hfS5+hfS5eh fH`q r (hfSeh fH`q r hfS hfS0J6jhfS6U hfS6jhfS6U3˯(~R)Nl\*P$xa$ & FxxPѿ[-$b/@]vI & F^x & Fx & F2x $ & F2xa$xxTCRQe   "#I] 6ɼεɘεmm+hfS5eh fH`q r (hfSeh fH`q r jنhfS6UhfSB*phhfSB*ph hfS0J6jFhfS6U hfS6jhfS6UhfS0JCJOJQJjhfSUjhfSU hfS5hfS*DRWz_q 63,4x & Fx & Fxx & F^x BCD|}78UVWabd|UVW%&ɸɸzpfhfSCJOJQJhfSCJOJQJjhfSCJUmHnHujhfSU hfS0J6jhfS6U hfS6jhfS6U hfS0JjrhfSUjhfSU+hfS5eh fH`q r (hfSeh fH`q r hfS&4Jd xUWy `&'  8x^` & F 8xxG        #*+,019ͷ⯧ͷͷ❘iiYhfScHdhdhdhzR5Hh;SfhfSeh fH`q r hfS0J6jhfS6U hfS6jhfS6UhfSB*phhfSB*ph+hfS5eh fH`q r (hfSeh fH`q r hfS hfS0JjhfSUj hfSU Se/Ib7\@x & F\<x     N 1@hj)hQ <dp & F]xxC$xC$xx9?@A()cd789GHºʰʰʰ}wjjhfSB*Uph hfS@j1hfS6UjhfS6U hfS0J6j!hfS6U hfS6jhfS6UhfSB*phhfSB*phhfShfScHdhdhdh;SfhfScHdhdhdhzR"hfS5cHdhdhdhzR#  pD\`a=>ehi   %"="C"D"$$%&-'T'( ())))))))),,,,,,12J2Ѽ՘Ѽ՘ѼѓѓѓѓѓѓуѼу~ hfS6hfScHdhdhdhzR hfS5hfSB*ph hfS@+hfS5eh fH`q r (hfSeh fH`q r hfShfSB*ph hfS0JjhfSB*UphjƋhfSB*Uph1pDa>i !%"D"]"D$$%.'()))),,,C.002 & F<<xxJ2L22 3333k4l4V5W5w5x5y555<<P=>>>>BBBBBBBBBBBBBDD:FKFiHzH{JJˇzuouu hfSCJ( hfS5jΌhfS6UhfSB*phhfSB*ph hfS0J6jOhfS6UjhfS6U hfSCJhfScHdhdhdhzR+hfS5eh fH`q r (hfSeh fH`q r hfS hfS6+223M3j4l4~445$5N6b6=89u;<P=>>?7@ABBCDFx & F 8xxFiHI{JJJKOLL6MM=NNNOOO4PqPP&QQQRRR~S & F & F & F & FJJJJKKOLfLLL6MAMMM=NGN;SZSS2T[T\TTTTTTTTUUU'U(UaUbUUUUUUXXXXZ-Z[;[[2\\\t]u]]]]jhfSUhfScHdhdhdhzRjahfSUjhfSU hfS0JjchfSUjhfSU hfS6 hfS5hfS+hfS5eh fH`q r 9~S[TT)UUUXXZ[[\\I]]^_J` aaDbDccddff & F & F & F]]]P^Q^^^^^^__h_i_j___`&``ayaab#bbbbbbBcCcccpdqdddddd}e~eeeeeeffӦ(hfSeh fH`q r hfS0J6jhfS6UjhfS6UjҕhfSUjAhfSU hfS6jܒhfSUjhfSUhfSjhfSU hfS0J1ffniiio1o7o8oIoJoooooo[p\pppppp q q0q1q2q3q4q`qaqbqqqqqr r3r4r5ròòÅ{vi{jZhfS6U hfS6jhfS6UhfS0JCJOJQJjhfSUjؙhfSUjhfSU hfS0JjhfSUjhfSU hfS5(hfSeh fH`q r hfS+hfS5eh fH`q r )ffhniiikno9oIoqqer4ttttuuzvOxyzzy{ $ & Fdxa$x & Fxxx5rbrcrzzz{y{{|}}}}7;6LGHIfgWvP{{{vvp hfSCJ hfS>* hfS5hfS0JCJOJQJ!jhfSCJOJQJUjhfSCJOJQJUhfSCJOJQJhfSCJH*OJQJhfSOJQJhfSCJOJQJhfS5CJH*OJQJhfS5CJOJQJhfSjhfS6U hfS0J6,y{|}}7aoЂ܂)\6$xa$ x^`^ hhx^h`h 8x^8` ^` 8^8` & Fdx $ & Fdxa$Cʉ̌ 2iW otњX & F8 8x $ & F8 8x$xx & F 8*x  & Fx^ & FxXmΜPȝ7Uhjnqw?&9 & F_x & Fx  & FU[<^[ x & F8 8x?&9TcapvwΩl`a+ 3QowԹ tu hfS0JjܜhfSB*UphhfSB*CJOJQJphjhfSB*UphhfS5B*ph hfS6 hfSCJhfS5B*ph hfS5hfShfSB*phhfSB*ph;9TcapΩl+ 4Qx & F_xx^չھ˿Pv6HXj\Ii & F{ & Fhx1$ & F`xx & F_xx^6HjoNVWjDZ[uDEnop4pw/̵­hfS0JCJjٞhfSCJUjhfSCJU hfSCJ hfS6CJ hfS>*CJ hfS5CJ hfSCJ hfSH* hfS6 hfS5hfSDigxwN|!Wj & Fx^`! x^`x^ & F`xxHDZ!)9  & F^ & F^`  & Fx^ & Fcxx^ & Fx^`9[u5,oS7_/ & F & Fbx & Fx & F x^` & Fx^`  & Fx^/AX.-^z 7Uh ux^ & Fox & Fex & Fx/7h "' 8 &   # 4   !!a"d"""$$T%%%%% ''''++f.{...//ѽѽѽ hfS5H* hfSH*hfS6B*phhfS5B*phhfSB*phhfSCJOJQJ hfS56 hfS5hfSB*ph hfS6hfSD & # 4         = PK & F< 8x^ & F< 8bx^`b & F< x^ & Fx & Foxx^KF$<M7L} & F< 8*x & F< 8x & Fx ^` ^` & F< x^ & F< 8x^} ;f,]1m: & Fl^ $ & Fxa$ & F< 8*x:G :!E!n!o!""#c##$$$T%&'()*|+,-]// 1 & Fp^ & Fl/L0i0 1$111114444o5~555v6666y778888 :5::X;<<<<===>> ?#?.@/@u@@@@@@@@AAA A5AUAoAAAAAAA6B7BڴhfS6CJOJQJhfS5B*CJOJQJphhfSOJQJhfSB*CJOJQJphhfS5CJOJQJhfSCJOJQJhfS hfS5CJ hfSCJB 1$1]2344o55v6y788 ::X;<<<=> ?u@@@!AUA $ & F}xa$ $hx^ha$ $ & F|xa$$xa$UAoAB#CCCIEFIJJ9KK LrLLLMxMMMMKNNNN & Ff"$a$x $ & F~xa$$xa$7BQBSBCgGhGJJJ_QOSPSSTMU[U\]]o^p^__demmuqzqqqctltuu+v8vx'x||bnag#+^_硛jhfSU hfS5 hfSCJ hfS5CJ hfSCJhfS<B*CJOJQJphhfSCJOJ QJ hfS<hfSB*phhfSB*phhfSCJOJQJhfShfS6CJOJQJhfSCJOJQJ8N7PPPQoQQRPSSsTTMU[UWYu[\]p^^__$xa$x & Fz & Fy & Fx & Fw & Fv & Ft & Fr & Fq & Ff__Mabdeeffgwhijsklmmmpuqzqqqsct  & Fx^ & Fx & F & Fxxctltuu+v8vx'xy{||Q}\bn݄5ax & Fx  & Fx^ & Fxa#]ӌ8]l`%ɔqWĖPoyњ & Fx & Fx & Fxx & Fx ڶ۶]g0:"6E      F"S"##+&0&5'F''')*+*+ռռ⡛ hfSCJ hfS5CJhfS<B*phhfS5B*phhfSB*ph hfSCJ hfSPJ hfS5hfSB*ph hfS6hfS hfS0JjhfSUjhfSU=њߠhbJңkv֦jçӧ & F<x & Fx & Fxx & FxE`tʩMaɮگ7K & Fx & F<x & Fx & F<۶wEmڹ (h tx & Fx & Fx & F0dY,1qa"N U7M?d & F & Fxx & FxVIC41U(|M & F & FdEK y    / ; P ~        & F & Fx & Fz & F   [t Z3  & F 8x & Fx & F & F!F"U"""!#h###+&2&5'G''') *+++++ & F 8x & F 8Tx^T & F 8x^` & F 8x & F 8x^*+++f,,,,---..#/q0r000000111111122*3+3,3R3S34444444778882838J9K9l9m9n999ݵݵjhfSUjhfSUj(hfSUjhfSU hfS0Jj|hfSUhfS0JCJOJQJjנhfSUjhfSUhfShfSB*CJph hfS5CJ hfSCJ6+f,,,,--..#/K/T012r47E9C:;N=>@ & Fnx 8x^` & FD 8Lx`L & F 8x & F 8x^9I:J::::::;;;;;;;V=W======>>>>>>>???)?*?@@?@@@A@U@V@[@\@@ynjbhfSUjŨhfSU.jhfSUcHdhdhdhzRhfScHdhdhdhzR(jhfSUcHdhdhdhzRjߦhfSU hfS0JjhfSUhfS0JCJOJQJj9hfSUjhfSUhfS+@@@@@iBjBBBBBBBBADBDhDiDjDDDHH_j_,`A`aa{bbccWdndܸܨܨܨܞܨܨܙܙܨܔܔܔܔܔܔܔܔ hfS5 hfS6HhzRhfShfScHdhdhdhzRjhfSUhfSB*ph hfS0JjܪhfSUhfShfS0JCJOJQJjhfSUj-hfSU:@XB5DF/FGpIKLLMNPQ:TEU VMWX\L^ & FDC$Eƀ:Rf & F & FnxL^>_,`a|bcXddZee'fffph jknro1p\pf@ffffffggsDuy&y04z"3ΖٖVr ;bg)*89W͆ hfS0J6johfS6UjhfS6U hfS0JjhfSUjhfSUhfSB*ph hfS5hfSCJOJQJ hfS6HhzRhfS5HhzRhfS&hfS5CJ cHdhdhdhzRhfS1^_`Q^AH2BѼڼYZ~ջՎ|q||jhfSUjhfSU hfS6 hfS5hfShfS0JCJ"jhfSB*CJUphjhfSB*CJUphhfSB*CJphjhfSB*UphhfSB*ph hfS0JjhfSB*UphjhfSB*Uph,QA2Ѽud?50M7 & Fxxx & FxWX}~'GHopqyz#$%:;Wb#:f hfS0J6jhfS6UjhfS6UjphfSUjhfSU hfS5 hfS0JjhfSUjhfSUhfS hfS6@}$gjb@ & Fxxxx & FxfgFG-mnMNp8 -./DEye . b !%!!!e"""### %O%R&S&n&L'''(((HhzRhfShfScHdhdhdhzR"hfS5cHdhdhdhzR hfS56hfS56OJQJjhfSCJUmHnHu hfS6hfS hfS5? ;70mFf7)gOh r W    & Fx & Fxxx & FxxnN Ey. !!e"#H & FxEƀzR & Fx & Fjxx# %L'))*[+S,,-W.mffd_____ & F & FxH & FxEƀzR H & FxEƀzR  (((()?)))***Z+[++R,S,d,,,----V.W...///}1~1122233D3A4m455666777|8}8899<:;;a<b<<<?=@======J>K>Y>n>>>>>.?????????@hfS56OJQJhfSHhzRhfS hfS5HhzRhfS5TW../~123A4567}89;<@==K>>?@xABPBB8CC & Fk x^`x & Fkx@@@@wAxAAABBNBOBPBXBBBBB7C8CCCCDD!D2DDDDEEE0EEEFZF[FFFFFGGGGG?H@H{H|H}HHHHHHIIIIII?J\JlJmJJJJJLLNNQQ2QXQUUjhfSU hfSCJjhfSCJUmHnHu hfS5hfShfS56OJQJQCDDEE[FFGG@H}HHIImJJKLNNN-N PQST^ & Fk x^`UBUCUDUOUPUrVsVVVVVV^^^^^ _ _uu+u,u-uTuUu<ržV2ԧss & F x^`()*78ڟ $%JKLSTҡӡԡƦЦ QZ  jhfSUmHnHuhfS6B*phhfSB*ph hfSB*fHphq hfSB*phjhfSCJUmHnHujhfSUjkhfSU hfS6 hfS0JjhfSUjhfSUhfS0O&ѿCc.8ZrtO G & Fx =rs+Gmq,3ps~IQ[\abj*hfSUjhfSUjhfSU hfS0JjhfSUjhfSUhfS5fH`q hfS6hfSB*phhfSB*phjhfSCJUmHnHuhfS hfS56-9$8Ur!X & F & F & Fa{/k     |     # @ V  & F & FAE   7 >     t w {    ^ b   - ] " 2    6 n o  q [& ' 6 6 )7 *7 +7 T7 U7 ;8 W8 += >= *? D? _? ? G G J J =N >N N N ûhfSOJQJ hfS>*jŽhfSU hfS@hfSB*phhfS6B*ph hfS5CJ hfS5jhfSCJUmHnHu hfS6hfSjhfSU hfS0J>V   j  7    Y -    ! " 2 s   5 $ & F xa$xx<x5    9   n o    G   p q N! $ & F% xa$ $ & F%xa$ $ & F$xa$ $ & F"xa$$xa$ $ & F!xa$x $ & F xa$N! ! ." P# # /$ r$ $ O% % [& ' ' ( ) ) |* 2+ + #, , - . u. . / 0 41 R2 $ & F%xa$R2 ^3 3 U4 4 5 5 t6 a7 7 :8 ;8 W8 8 z9 9 9 : : ; ; :< < < Y= $ & F'xa$ $ & F&xa$ vx^v` $ & F%xa$Y= = d> > L? ? @ x@ A 1B B mC C RD D SE E F G G G UH H II I ;J zJ J K $ & F'xa$K `L L NM M >N   K [ ۀ b  ‚  ۂ U  { % <<< $ <a$} } } } } } } } } } !~ "~ #~ 6~ t~ u~ ~ ~ ~ ~ ~ ~ ~    < = [ \        ! " I J K [ ـ ڀ 4 5 6 ` jihfSUj`hfSUjKhfSUj"hfSUjhfSUj hfSU hfS56jhfSUhfS hfS0JjhfSUjBhfSU8` a   7 8  ۂ - . / S T s t % & ^ _ ` ބ ߄ , - Y Z [ y z jEhfSUjThfSUj7hfSUjhfSUj-hfSUj$hfSU hfS56jhfSU hfS0JjhfSUhfSjhfSU8 # $ N O ݆ ކ    P Q m n ݇ އ    D E f g Έ ψ    Y Z ˉ ̉ ͉ ) * n jThfSUjhfSUjhfSUjhfSUjhfSUjhfSU hfS56jhfSUhfS hfS0JjhfSU9 R F [ Q M E<EƀzR< n o p Ԋ Պ    O P z { 1 2 3 K L M V h i u ››|j#HhzRhfS0JCJOJQJ!j7hfSCJOJQJUjhfSCJOJQJUHhzRhfSCJOJQJhfS0JCJOJQJjhfSUhfSCJOJQJj[hfSUjhfSUhfS hfS0JjhfSUjhfSU( Ȍ Ɍ  ď ŏ ݏ ޏ #$a$ #$^`a$$a$CEƀzR   7 7 7 7 8 8 8 @ @ @ @ @ @ A B V V v v v v v v v %w 'w (w )w *w +w 6w aw { { { ,| zhfS5>*CJOJQJhfSCJOJQJjhfS5UjGhfS5Uj hfSU hfS>* hfS5 hfS5CJ hfSCJ hfSCJhfS56>*hfS56CJhfS>*OJQJhfS6OJQJhfSOJQJhfS hfS5>*, v w & ' ϔ Д | }   L M 6 7 ™ Ù ٚ ښ S #$a$ #$^`a$S T } ~ L M ݞ ޞ _ ` ţ ƣ #$a$ #$^`a$ #$^`a$ b c ! " t u ֬ ׬   & ' #$^`a$ #$^`a$#$a$' @ A K L   S T Z [ ) * m n  ! #$^`a$#$a$ #$^`a$     ; ξ I J P Q * + ! A B X #$^`a$#$a$ #$^`a$ D E ! " G H     p q #$a$ #$^`a$ #$^`a$ #$^`a$ T U   C D 7 8    ! } ~ #$^`a$ #$^`a$#$a$ #$^`a$ \ ] H I % & b c   k l E F A #$^`a$#$a$A B a b v w 7 8   E F ~  #$^`a$#$a$ 2 3  ! " G H   Z #$^`a$#$a$ #$^`a$Z [ i j   ` a  S T #$^`a$ #$@ ^@ `a$ #$^`a$#$a$ E F # $ ( ) 4 5 6 M N       #$^`a$ #$^`a$#$a$     a b  @ A w x   6 7 8 b c    #$^`a$#$a$ #$^`a$    * +               . /   " # n #$^`a$ #$^`a$#$a$n o   R S    g h  r s     % & ! ! e! f! 1" 2" " #$@ ^@ `a$ #$^`a$#$a$" " # # {$ |$ |% }% % % J& K& & & & & u' v' w' ) ) ) ) S* T* * * #$^`a$ #$^`a$#$a$* 7+ 8+ 8, 9, , , E. F. -/ ./ j/ k/ 0 0 1 1 1 1 2 2 2 2 3 3 #^` #^`##$a$ #$^`a$3 3 3 4 4 )5 *5 5 5 6 6 7 7 7 7 8 8 8 B8 _8 8  v < v < v  v #^` #^`# #@ ^@ `8 8 8 8 9 19 `9 9 9 9 9 9 +: X: ~: : : : : #; [; ; ; 8< V<  v  X<^ `X v <V< v< < < < < = L= d= = = = = > -> T> o> > > > (? R? w? ? ? ? @ ?@  v <?@ o@ @ @ @ @ @ A D E E SF H /I YI IK L DN O Q R T SW JY c[ ]  d v <] 7] _ w` rd f g g g g g g g g 1k n p s t t v v v v v v v $a$ $$$a$$a$v v v v v v v v %w &w 'w )w +w bw rx Bz z { *| w} ~  B  . $xa$x$a$$a$,| 4| y} } ~ ~ B D P      x y ͕ @ A e f z { | } ռ洬拃~~~vij= hfSCJUVjhfSU hfS5jWhfSUjH= hfSCJUVj<hfSUhfS<B*CJphhfSB*phhfSB*ph hfS0J6jLWhfS6U hfS6jhfS6U hfS5>*hfShfSCJOJQJhfS5CJOJQJ&. ΅ o ? {  * T Ԏ ~  O ͕  & Fx & Fx & Fxx  ) 0 @ A B C Z e ~ k ՙ 0 e  , a Ο Ƣ   & Fx$a$$a$xx & Fx t "  ԩ Z - Q ά 2 M   ²   $a$$a$ & Fxx  л ݻ , =  n r i j f o H Q  U V  ' ` x ;    ] ( 2 Ʋܫ hfS5CJ hfS0J6j'hfS6UjhfS6UhfS<B*CJph hfS5 hfS< hfS6hfSjhfSUj|hfSUC s O  ,  F f  & Fx & Fxxf o  H Q  ' + B U V W t U  |  `  & F & Fx & Fxx ; < $Iflkd$$Ifl0!*N 04 la $$Ifa$  $ $IflkdW$$Ifl0!*N 04 la$ % $Iflkd&$$Ifl0!*N 04 la  $ $Iflkd$$Ifl0!*N 04 la$ % 5 D $Iflkd$$Ifl0!*N 04 laD E f  p   1 s  & Flkdi$$Ifl0!*N 04 la +     0 J X ] $$Ifa$$a$ & F] ^ _ ` a b c F===== $$Ifa$kd8$$Iflֈ _!$>;R 04 lac d e f g h i =kd"$$Iflֈ _!$>;R 04 la $$Ifa$i j k l m n o =kd $$Iflֈ _!$>;R 04 la $$Ifa$o p q r s t u =kd$$Iflֈ _!$>;R 04 la $$Ifa$u v w x y z { =kd$$Iflֈ _!$>;R 04 la $$Ifa${ | } ~  =kdʫ$$Iflֈ _!$>;R 04 la $$Ifa$ $$Ifa$ F===== $$Ifa$kd$$Iflֈ _!$>;R 04 la =kd$$Iflֈ _!$>;R 04 la $$Ifa$ =kd$$Iflֈ _!$>;R 04 la $$Ifa$ =kdr$$Iflֈ _!$>;R 04 la $$Ifa$ =kd\$$Iflֈ _!$>;R 04 la $$Ifa$ =kdF$$Iflֈ _!$>;R 04 la $$Ifa$ $$Ifa$ F===== $$Ifa$kd0$$Iflֈ _!$>;R 04 la =kd$$Iflֈ _!$>;R 04 la $$Ifa$ ( =83$a$$a$kd$$Iflֈ _!$>;R 04 la $$Ifa$( 3 D L U b o } $$Ifa$$a$ 2 3   0 8 x  0 8 9 E w j |  8 [ b q x F G L T } t |     `  ! o p   B C S T oC C C C C C hfS56hfSCJOJ QJ hfSCJOJQJ hfSCJj3hfSUj3hfSCJU hfS>* hfS6 hfSCJ hfS5CJ hfS5 hfS5CJhfSA  $$Ifa$kd$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd4$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kdW$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kdz$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$  $$Ifa$kd)$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$ $a$kdL$$IfTHִEV%,)HW[=0    4 HaT $$Ifa$$a$ F===== $$Ifa$kdo$$Iflֈ` 2!$Zr404 la =kd$$Iflֈ` 2!$Zr404 la $$Ifa$ =kd$$Iflֈ` 2!$Zr404 la $$Ifa$     =kd$$Iflֈ` 2!$Zr404 la $$Ifa$     =kdk$$Iflֈ` 2!$Zr404 la $$Ifa$    =kdU$$Iflֈ` 2!$Zr404 la $$Ifa$       $$Ifa$       F===== $$Ifa$kd?$$Iflֈ` 2!$Zr404 la    ! " =kd)$$Iflֈ` 2!$Zr404 la $$Ifa$" # $ % & ' ( =kd$$Iflֈ` 2!$Zr404 la $$Ifa$( ) * + , - . =kd$$Iflֈ` 2!$Zr404 la $$Ifa$. / 0 1 2 3 4 =8$a$kd$$Iflֈ` 2!$Zr404 la $$Ifa$4 j } $$Ifa$$a$ F===== $$Ifa$kd$$Iflֈ` 2!$Zr404 la =kd$$Iflֈ` 2!$Zr404 la $$Ifa$ =kd$$Iflֈ` 2!$Zr404 la $$Ifa$ =kd$$Iflֈ` 2!$Zr404 la $$Ifa$ =kd$$Iflֈ` 2!$Zr404 la $$Ifa$ =kd$$Iflֈ` 2!$Zr404 la $$Ifa$ $$Ifa$ F===== $$Ifa$kd$$Iflֈ` 2!$Zr404 la =kd$$Iflֈ` 2!$Zr404 la $$Ifa$      =kdu$$Iflֈ` 2!$Zr404 la $$Ifa$    =kd_$$Iflֈ` 2!$Zr404 la $$Ifa$     =8$a$kdI$$Iflֈ` 2!$Zr404 la $$Ifa$  9 _ N W m   F H y p  0   N c      x$xa$$a$x  <    `    2   ! D o  & F4x$Ifx & F 8xxo p    & F4x$IflkdO\$$Ifl0H$04 la  B  & F4x$Iflkd\$$Ifl0H$04 laB C R S ~x$If  & F4x$IflkdW]$$Ifl0H$04 laS T U ! x$ % & , ), / 2 4 r8 8 Q: > xlkd]$$Ifl0H$04 la> @ 6C 7C BC oC C C C D _U <<$Ifkd_^$$Ifl40$V 04 lap $<<$Ifa$xx D D D gE hE lkd)_$$Ifl0$V04 la <<$IfC D E iE E E F F F G G 7H =H fH H I I I J J J AK zK L HL L M M M N N O O YP P P Q xR R R R S S S S S @S pS ?T xT +U eU V V V 8V sW X X KX LX X }Y Y Y Z Z Z 7[ hfSOJQJhfS5B*phhfS5B*CJphhfSOJQJmH nH u hfS0JjhhfSUjhfSU hfS6 hfS5hfSDhE iE E E <<$Iflkd_$$Ifl0$V04 laE E F F <<$IflkdM`$$Ifl0$V04 laF F rG sG <<$Iflkd`$$Ifl0$V04 lasG tG dH eH <<$Iflkdqa$$Ifl0$V04 laeH fH I I <<$Iflkdb$$Ifl0$V04 laI I J J <<$Ifokdb$$Ifl40$V04 laf4J J ?K @K <<$Iflkd-c$$Ifl0$V04 la@K AK L L <<$Iflkdc$$Ifl0$V04 laL L L L <<$IflkdQd$$Ifl0$V04 laL L M M <<$Iflkdd$$Ifl0$V04 laM M N N <<$Iflkdue$$Ifl0$V04 laN N O O <<$Iflkdf$$Ifl0$V04 laO O WP XP <<$Ifokdf$$Ifl40$V04 laf4XP YP P P <<$Iflkd1g$$Ifl0$V04 laP P vR wR <<$Iflkdg$$Ifl0$V04 lawR xR >S ?S <<$IflkdUh$$Ifl0$V04 la?S @S =T >T <<$Iflkdti$$Ifl0$V04 la>T ?T )U *U <<$Iflkdj$$Ifl0$V04 la*U +U V V <<$Ifokdj$$Ifl40$V04 laf4 V V pW qW <<$Ifokd0k$$Ifl40$V04 laf4qW rW sW X X X $X 3X { ?{ @{ |tttx$Ifkd$$Ifl  F$$P t0      4 la@{ A{ { { { |tttx$Ifkd4$$Ifl  F$$P t0      4 la{ { | | | |tttx$Ifkdԓ$$Ifl  F$$P t0      4 la | | | | | |tttx$Ifkdt$$Ifl  F$$P t0      4 la| | h} i} j} |tttx$Ifkd$$Ifl  F$$P t0      4 laj} k} } } } |tttx$Ifkd$$Ifl  F$$P t0      4 la} } B~ C~ D~ |tttx$IfkdT$$Ifl  F$$P t0      4 laD~ E~ ~ ~ ~ |tttx$Ifkd$$Ifl  F$$P t0      4 la~ ~ ~ ~ ~ |tttx$Ifkd$$Ifl  F$$P t0      4 la~ ~    |tttx$Ifkd4$$Ifl  F$$P t0      4 la  zrrrx$IfkdԘ$$Ifl  DF$$P t0      4 la |tttx$Ifkdx$$Ifl  F$$P t0      4 la M N O |tttx$Ifkd$$Ifl  F$$P t0      4 laO P |tttx$Ifkd$$Ifl  F$$P t0      4 la 8 9 : |tttx$IfkdX$$Ifl  F$$P t0      4 la: ; ̃ ̓ ΃ |tttx$Ifkd$$Ifl  F$$P t0      4 la΃ σ Ѓ у \ ] Ƅ [ ͅ g |xvvtttjjjjj $ & Fxa$xkd$$Ifl  F$$P t0      4 la   $ P  ى Ԋ Պ  b K $ ? $ & Fxa$x $ & Fxa$   ى   8 9 : ] ^ Ԋ   $ ( ƥ ʥ Υ ӥ ٧ ڧ 0 1 ֪ ڪ p w } + 6 h ƼƯҢҢҢҒҒ}ҢҢҢҢҢҢj hfSUHhRfhfShfScHdhdhdhRf hfS6hfS0JCJjhfSCJUjhfSCJU hfSCJ hfSCJhfShfSB*phhfSB*ph hfS0JjhfSUjhfSU1? Δ c o = 0 p a Ϡ > t 5 ˤ ̤ h $ & Fxa$ $ & Fxa$$8hx^8`ha$ s զ v w x 3 Q R h ة Ǫ l ]  , ȯ j D x $ & Fxa$h s c f ո ظ I J { | } T Y ^ c  ޿  A n o    0 6  " % ϾhfS0JCJjhfSCJUjhfSCJUhfSCJOJQJ hfSCJ hfS0JjhfSUjhfSU hfS5HhRfhfShfScHdhdhdhRfhfS hfS68D 5 յ ^   ] ƻ   P   $ & Fxa$x $ & Fxa$ ݿ ޿ P  4 G { $  9 >    "$ & Fa$  x P h p  k  _ % x  f ; "$ & Fa$ c ! N ] J   A d  x   J $ & Fxa$ $ & Fxa$x"$ & Fa$J ! v 8   x ~ g ' 0 ? $x$Ifa$ $ & Fxa$% ( ' ? @ ` a             R S G H       B   # $ 2 O P x / 0 @  -  :       ~       [ ^ _ v     .hfS5CJOJQJcHdhdhdhzRhfSCJOJQJhfS5CJOJQJhfS hfS6O? @ ` x$Iflkd$$Ifl0D % <04 la ` x$Iflkd£$$Ifl0D % <04 la` a j  x$Iflkdb$$Ifl0D % <04 la    x$Iflkd$$Ifl0D % <04 la  %  x$Iflkd$$Ifl0D % <04 la    x$Iflkd$$Ifl0D % <04 la    x$Iflkd$$Ifl0D % <04 la  *  x$Iflkd<$$Ifl0D % <04 la   R x$IflkdΧ$$Ifl0D % <04 laR S e G x$Iflkd`$$Ifl0D % <04 laG H _  x$Iflkd$$Ifl0D % <04 la    x$Iflkd$$Ifl0D % <04 la    x$Iflkd$$Ifl0D % <04 la  x$Iflkd$$Ifl0D % <04 la  B x$Iflkd:$$Ifl0D % <04 laB C D o  <<$Iflkd̫$$Ifl0D % <04 la  # XK $<<$Ifa$kd^$$Ifl4F6$ ```0    4 lap# $ M N O $If <<$If\kdb$$Ifl4$h%04 laf4O P |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4 - . / |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4/ 0 |rll$If <<$Ifkdj$$Ifl4F6$0    4 laf4    |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4 |rll$If <<$Ifkdư$$Ifl4F6$0    4 laf4    |rll$If <<$Ifkdt$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkd"$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkdв$$Ifl4F6$0    4 laf4  \ ] ^ |rll$If <<$Ifkd~$$Ifl4F6$0    4 laf4^ _    |tnn$Ifx$Ifkd,$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkdڴ$$Ifl4F6$0    4 laf4               A B C E F r s P         @ A k      :    -   , 6 7 9 : O                θ긨HhzRhfSCJOJQJ+hfSCJOJQJcHdhdhdhzR.hfS5CJOJQJcHdhdhdhzRhfShfSCJOJQJhfS5CJOJQJB     |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkd6$$Ifl4F6$0    4 laf4   |o $<<$Ifa$kd$$Ifl4F6$0    4 laf4     $If <<$If\kd$$Ifl4$h%04 laf4  C D E |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4E F    |rll$If <<$Ifkd¸$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkdp$$Ifl4F6$0    4 laf4  > ? @ |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4@ A    |rll$If <<$Ifkd̺$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkdz$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkd($$Ifl4F6$0    4 laf4  7 8 9 |rll$If <<$Ifkdּ$$Ifl4F6$0    4 laf49 :    |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkd2$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4     |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4     ? @ O _         $ % 5 Q   O S T \      e f h i v       % & ( ) A !  ? |rll$If <<$Ifkd<$$Ifl4F6$0    4 laf4? @    |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4  " # $ |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4$ %    |rll$If <<$IfkdF$$Ifl4F6$0    4 laf4  Q R S |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4S T    |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4  f g h |tnn$Ifx$IfkdP$$Ifl4F6$0    4 laf4h i    |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4  & ' ( |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4( ) |tnn$Ifx$IfkdZ$$Ifl4F6$0    4 laf4 |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4 =! >! ?! |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4?! @! 4" 5" 6" |tnn$Ifx$Ifkdd$$Ifl4F6$0    4 laf46" 7" t" u" v" |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4v" w" " " " |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4" " # # # |tnn$Ifx$Ifkdn$$Ifl4F6$0    4 laf4# # # # # |tnn$Ifx$Ifkd$$Ifl4F6$0    4 laf4# # p$ q$ r$ |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4p$ r$ s$ $ c% d% f% g% % % % % % & & & & ' ' ,' ' ' ' ' ' ( ( ( ( ( ( l) m) o) p) ) * * "* #* j* l* m* * ^+ _+ a+ b+ + + + + + + + + ,, -, V, , , , <- =- U- - - - - - - ,. A. B. K. `. z. {. . hfS56hfS56CJ hfS5hfS56OJQJhfS5CJOJQJhfShfSCJOJQJNr$ s$ d% e% f% |rll$If <<$Ifkdx$$Ifl4F6$0    4 laf4f% g% % % % |rll$If <<$Ifkd&$$Ifl4F6$0    4 laf4% % }& ~& & |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4& & ' ' ' |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4' ' ' ' ' ' ' ' |rrrrll$If <<$Ifkd0$$Ifl4F6$0    4 laf4' ' ' |o $<<$Ifa$kd$$Ifl4F6$0    4 laf4' ' ~( ( ( $If <<$If\kd$$Ifl4$h%04 laf4( ( ( ( ( |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4( ( m) n) o) |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4o) p) * !* "* |rll$If <<$Ifkdj$$Ifl4F6$0    4 laf4"* #* j* k* l* |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4l* m* _+ `+ a+ |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4a+ b+ + + + |rll$If <<$Ifkdt$$Ifl4F6$0    4 laf4+ + + |o $<<$Ifa$kd"$$Ifl4F6$0    4 laf4+ + *, +, ,, $If <<$If\kd$$Ifl4$h%04 laf4,, -, , , , |rll$If <<$IfkdR$$Ifl4F6$0    4 laf4, , :- ;- <- |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4<- =- - - - |rll$If <<$Ifkd$$Ifl4F6$0    4 laf4- - - - - |rll$If <<$Ifkd\$$Ifl4F6$0    4 laf4- - - - ". #. ,. A. |trprjj$Ifwx^wkd $$Ifl4F6$0    4 laf4A. B. a. z. $Ifokd$$Ifl40T$04 laf4z. {. . . . $Ifokd$$Ifl40T$04 laf4. . . . . . ZKK>> $$x$Ifa$ @$IfkdX$$IflF T$           0    4 lap. . . . . / / "/ B/ R/ U/ w/ / / / / / / / / 0 0 #0 H0 V0 Y0 0 0 0 0 0 0 0 1 1 1 :1 T1 U1 1 1 1 1 1 1 2 "2 #2 &2 72 D2 E2 G2 H2 d2 r2 u2 2 hfSOJQJ hfS5CJ hfS6CJOJQJmH nH uhfSCJOJQJmH nH u hfS56 hfS5 hfS6hfShfS6CJmH nH uhfSCJmH nH uhfS56CJ9. . / / / !/ rree $$x$Ifa$ @$Ifkdg$$IflF T$   0    4 la!/ "/ B/ R/ S/ T/ rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 laT/ U/ w/ / / / rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la/ / / / / / rree $$x$Ifa$ @$Ifkdm$$IflF T$ 0    4 la/ / / / / / rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la/ / 0 0 !0 "0 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la"0 #0 H0 V0 W0 X0 rree $$x$Ifa$ @$Ifkde$$IflF T$ 0    4 laX0 Y0 0 0 0 0 rree $$x$Ifa$ @$Ifkd $$IflF T$ 0    4 la0 0 0 0 0 0 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la0 0 0 0 0 1 1 wusmm$If$$xa$kd]$$IflF T$ 0    4 la1 1 ;1 T1 $Ifokd$$Ifl40T$04 laf4T1 U1 _1 h1 1 $Ifokd$$Ifl40T$04 laf41 1 1 1 1 1 ZKK>> $$x$Ifa$ @$Ifkd$$IflF T$           0    4 lap1 1 2 #2 $2 %2 ppcc $$x$Ifa$ @$Ifkd$$IflF T$   0    4 la%2 &2 72 E2 F2 G2 ppcc $$x$Ifa$ @$Ifkdx$$IflF T$ 0    4 laG2 H2 d2 r2 s2 t2 ppcc $$x$Ifa$ @$Ifkd.$$IflF T$ 0    4 lat2 u2 2 2 2 2 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la2 2 2 2 2 2 2 2 2 3 %3 (3 Q3 _3 b3 3 3 3 3 3 3 3 4 4 4 :4 X4 Y4 b4 w4 4 4 4 4 4 4 4 4 4 5 5 5 !5 /5 15 25 L5 Z5 \5 ]5 x5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 6 '6 )6 *6 M6 [6 ]6 ^6 6 6 6 6 6 6 6 hfS56 hfS5 hfS6hfShfSOJQJhfS56CJhfS6OJQJR2 2 2 2 2 2 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la2 2 2 2 2 2 rree $$x$Ifa$ @$IfkdB$$IflF T$ 0    4 la2 2 3 %3 &3 '3 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la'3 (3 Q3 _3 `3 a3 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 laa3 b3 3 3 3 3 rree $$x$Ifa$ @$Ifkd:$$IflF T$ 0    4 la3 3 3 3 3 3 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la3 3 3 4 4 4 rree $$x$Ifa$ @$Ifkd$$IflF T$ 0    4 la4 4 4 94 :4 C4 X4 }{}uu$Ifkd2$$IflF T$ 0    4 laX4 Y4 x4 4 $Ifokd$$Ifl40T$04 laf44 4 4 4 4 $Ifokd$$Ifl40T$04 laf44 4 4 4 4 4 ZTTGG $$x$Ifa$$Ifkdz$$IflF T$           0    4 lap4 4 4 5 5 5 yyll $$x$Ifa$$Ifkd$$IflF T$   0    4 la 5 5 !5 /5 05 15 rree $$x$Ifa$ `@ $Ifkd?$$IflF T$ 0    4 la15 25 L5 Z5 [5 \5 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la\5 ]5 x5 5 5 5 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la5 5 5 5 5 5 rree $$x$Ifa$ `@ $Ifkd7$$IflF T$ 0    4 la5 5 5 5 5 5 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la5 5 5 5 5 5 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la5 5 6 '6 (6 )6 rree $$x$Ifa$ `@ $Ifkd/$$IflF T$ 0    4 la)6 *6 M6 [6 \6 ]6 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la]6 ^6 6 6 6 6 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la6 6 6 6 6 6 rree $$x$Ifa$ `@ $Ifkd'$$IflF T$ 0    4 la6 6 6 6 7 7 7 ;7 V7 X7 Y7 7 7 7 7 7 7 7 7 7 7 7 8 18 28 f8 g8 y8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 9 9 9 /9 =9 @9 a9 o9 r9 9 9 9 9 9 9 : : : &: 4: 7: J: X: Z: [: : : : : : : : hfS5CJhfSOJQJhfS6OJQJhfSOJQJ hfS56 hfS6hfS hfS5hfS56CJL6 6 6 7 7 7 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la7 7 ;7 V7 W7 X7 rree $$x$Ifa$ @$Ifkdw$$IflF T$ 0    4 laX7 Y7 7 7 7 7 rree $$x$Ifa$ `@ $Ifkd$$IflF T$ 0    4 la7 7 7 7 7 7 7 wuuoo$If$$xa$kd$$IflF T$ 0    4 la7 7 8 18 $Ifokdo$$Ifl40T$04 laf418 28 <8 E8 f8 $Ifokd?$$Ifl40T$04 laf4f8 g8 y8 8 8 8 ZMM@@ $$x$Ifa$  $Ifkd$$IflF T$           0    4 lap8 8 8 8 8 8 rree $$x$Ifa$  $Ifkd$$IflF T$   0    4 la8 8 8 8 8 8 rree $$x$Ifa$  $Ifkd$$IflF T$ 0    4 la8 8 8 8 8 8 rree $$x$Ifa$  $Ifkd|$$IflF T$ 0    4 la8 8 9 9 9 9 rree $$x$Ifa$  $Ifkd$$$IflF T$ 0    4 la9 9 /9 =9 >9 ?9 rree $$x$Ifa$  $Ifkd$$IflF T$ 0    4 la?9 @9 a9 o9 p9 q9 rree $$x$Ifa$  $Ifkdt $$IflF T$ 0    4 laq9 r9 9 9 9 9 rree $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la9 9 9 9 9 9 rree $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la9 9 : : : : rree $$x$Ifa$  $Ifkdl $$IflF T$ 0    4 la: : &: 4: 5: 6: rree $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la6: 7: J: X: Y: Z: rree $$x$Ifa$  $Ifkd $$IflF T$ 0    4 laZ: [: : : : : rree $$x$Ifa$  $Ifkdd $$IflF T$ 0    4 la: : : : : : rree $$x$Ifa$  $Ifkd  $$IflF T$ 0    4 la: : : : ; ; rree $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la: : : : ; ; ; ); ,; J; K; T; j; ; ; ; ; ; ; ; ; ; < < !< /< 2< J< W< X< [< r< < < < < < < < < < < = = $= 2= 5= ]= k= n= o= = = = = @> Z> [> > > > > > > > > ? ? /? 2? P? S? u? 濸濸 hfS6@ hfS@ hfS56 hfS5hfSOJQJhfSOJQJmH nH uhfS56CJ hfS6hfS hfS5CJH; ; ; +; ,; 5; J; trf``$If  x   xkd\ $$IflF T$ 0    4 laJ; K; k; ; $Ifokd $$Ifl40T$04 laf4; ; ; ; ; $Ifokd $$Ifl40T$04 laf4; ; ; ; ; ; ZKK>> $$x$Ifa$  $Ifkd $$IflF T$           0    4 lap; ; ; < < < ppcc $$x$Ifa$  $Ifkd $$IflF T$   0    4 la< < !< /< 0< 1< ppcc $$x$Ifa$  $Ifkdi $$IflF T$ 0    4 la1< 2< J< X< Y< Z< ppcc $$x$Ifa$  $Ifkd $$IflF T$ 0    4 laZ< [< r< < < < ppcc $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la< < < < < < ppcc $$x$Ifa$  $Ifkda $$IflF T$ 0    4 la< < < < < < ppcc $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la< < < = = = ppcc $$x$Ifa$  $Ifkd $$IflF T$ 0    4 la = = $= 2= 3= 4= ppcc $$x$Ifa$  $IfkdY $$IflF T$ 0    4 la4= 5= ]= k= l= m= ppcc $$x$Ifa$  $Ifkd $$IflF T$ 0    4 lam= n= o= = = = sqkk$If  xkd $$IflF T$ 0    4 la= = A> Z> $IfokdQ $$Ifl40T$04 laf4Z> [> e> n> > $Ifokd! $$Ifl40T$04 laf4> > > > > ZM@@ $$x$Ifa$ ` p$Ifkd $$IflF T$           0    4 lap> > > > > ree $$x$Ifa$ ` p$Ifkd $$IflF T$   0    4 la> > > > > ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la> > ? ? ? ree $$x$Ifa$ ` p$Ifkd^ $$IflF T$ 0    4 la? ? /? 0? 1? ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la1? 2? P? Q? R? ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 laR? S? u? v? w? ree $$x$Ifa$ ` p$IfkdV $$IflF T$ 0    4 law? x? ? ? ? ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 lau? x? ? ? ? ? ? ? @ @ @ /@ 2@ S@ U@ V@ v@ y@ @ @ @ @ @ @ @ @ A A 5A 7A 8A ZA ]A ^A A A A A A A A A B B 4B 7B DB GB VB YB nB qB B B B B B B B B B B C C C !C 6C 9C MC PC QC RC SC nD oD pD rD hfS0J%h7GnjhfS0J)U hfS56 hfS5hfSOJQJ hfS5CJhfShfS56CJL? ? ? ? ? ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la? ? ? ? ? ree $$x$Ifa$ ` p$IfkdN $$IflF T$ 0    4 la? ? @ @ @ ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la @ @ /@ 0@ 1@ ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la1@ 2@ S@ T@ U@ ree $$x$Ifa$ ` p$IfkdF $$IflF T$ 0    4 laU@ V@ v@ w@ x@ ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 lax@ y@ @ @ @ ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la@ @ @ @ @ ree $$x$Ifa$ ` p$Ifkd> $$IflF T$ 0    4 la@ @ @ @ @ ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la@ @ A A A ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 laA A 5A 6A 7A ree $$x$Ifa$ ` p$Ifkd6 $$IflF T$ 0    4 la7A 8A ZA [A \A ree $$x$Ifa$ ` p$Ifkd $$IflF T$ 0    4 la\A ]A ^A A A A A sqskk$If  xkd $$IflF T$ 0    4 laA A A A $Ifokd. $$Ifl40T$04 laf4A A A A B $Ifokd $$Ifl40T$04 laf4B B 4B 5B 6B ZL?? $$x$Ifa$ p x$Ifkd! $$IflF T$           0    4 lap6B 7B DB EB FB qdd $$x$Ifa$ p x$Ifkd" $$IflF T$   0    4 laFB GB VB WB XB qdd $$x$Ifa$ p x$Ifkd# $$IflF T$ 0    4 laXB YB nB oB pB qdd $$x$Ifa$ p x$Ifkd;$ $$IflF T$ 0    4 lapB qB B B B qdd $$x$Ifa$ p x$Ifkd$ $$IflF T$ 0    4 laB B B B B qdd $$x$Ifa$ p x$Ifkd% $$IflF T$ 0    4 laB B B B B qdd $$x$Ifa$ p x$Ifkd3& $$IflF T$ 0    4 laB B B B B qdd $$x$Ifa$ p x$Ifkd& $$IflF T$ 0    4 laB B B B B qdd $$x$Ifa$ p x$Ifkd' $$IflF T$ 0    4 laB B C C C qdd $$x$Ifa$ p x$Ifkd+( $$IflF T$ 0    4 laC C C C C qdd $$x$Ifa$ p x$Ifkd( $$IflF T$ 0    4 la C !C 6C 7C 8C qdd $$x$Ifa$ p x$Ifkd{) $$IflF T$ 0    4 la8C 9C MC NC OC qdd $$x$Ifa$ p x$Ifkd#* $$IflF T$ 0    4 laOC PC QC RC nD oD pD qD rD D D sqoqhbq[Y''&`& !&&`(  xkd* $$IflF T$ 0    4 la rD sD yD zD ~D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D D E E E E E #E $E *E eE ཱhfSOJQJhfS5OJQJhfSCJOJ QJ h7Gn#hfS0J%cHdhdhdhzRHhzRhfS0J% jHhzRhfS0J%U hYR+hYR+h7Gn0J%mHnHuhfShfS0J%mHnHu hfS0J%jhfS0J%U1D D D D D D D D D D D D D D D D & ]''&`#$'h]hK'&`#$C$EƀzR'$a$'&`D E E E E #E $E ,E fE gE oE E E E E E E =F >F FF {F |F F F F F F F #'h]h'&`#$eE fE gE mE E E E E E E E E F DF zF {F |F F F F F F F F G G G G G *G +G IG MG xG yG |G G G G H H ^I aI bI gI lI {hfSCJOJQJ#hfSCJOJQJfHq &hfS5CJOJQJfHq hfSCJ hfS>*CJOJ QJ hfSCJOJ QJ hfS5hfS5OJQJhfS5OJQJ hfSCJhfSOJQJhfS5OJQJhfShfSOJ QJ /F F G G G G G *G +G BG CG yG zG {G |G aI bI J J }K ~K K  & Fx,$d!%d$&d!'d$-D@M N!O$P!Q$$a$#lI uI I I I I K K K K K K K K L L M M jN yN P P Q Q NR }R ~R R R R R R R R 1S 2S S S &T 'T IT gT mT )U U U W W W W W W ˾˷js+ hfSOJ QJ UhfS5CJOJQJ hfS5CJ hfSCJ hfS5CJ$hfS56B*CJ$phhfS5B*CJ$phhfSB*CJphhfS56CJ hfS5CJ hfS5hfS hfS63K K K K K L M jN P Q MR NR }R ~R R R 1S 2S S S &T 'T sT T U  & Fx & Fx$a$ @ x^@ `$a$ -DM U 'U (U )U U U W W W W W W W W W W W W W W W W W W $a$$a$$xa$ $hx^ha$W W W W W W W W W W W hfSCJhfS)hfS5B*CJOJ QJ mHnHphu 1000&PP/R / =!"#$%' 0&P1/ =!"#$%$&P1/ =!"#$%' 0&P1/ =!"#$%' 0&P1/ =!"#$%' 0&P1/ =!"#$%$&P1/ =!"#$%' 0&P1/ =!"#$%' 0&P1/ =!"#$%$&P1/ =!"#$%$&P1/ =!"#$%' 0&P1/ =!"#$%$&P1/ =!"#$%}DyK _Toc510937329}DyK _Toc510937329}DyK _Toc510937330}DyK _Toc510937330}DyK _Toc510937331}DyK _Toc510937331}DyK _Toc510937332}DyK _Toc510937332}DyK _Toc510937333}DyK _Toc510937333}DyK _Toc510937334}DyK _Toc510937334}DyK _Toc510937335}DyK _Toc510937335}DyK _Toc510937336}DyK _Toc510937336}DyK _Toc510937337}DyK _Toc510937337}DyK _Toc510937338}DyK _Toc510937338}DyK _Toc510937339}DyK _Toc510937339}DyK _Toc510937340}DyK _Toc510937340}DyK _Toc510937341}DyK _Toc510937341}DyK _Toc510937342}DyK _Toc510937342}DyK _Toc510937343}DyK _Toc510937343}DyK _Toc510937344}DyK _Toc510937344}DyK _Toc510937345}DyK _Toc510937345}DyK _Toc510937346}DyK _Toc510937346}DyK _Toc510937347}DyK _Toc510937347}DyK _Toc510937348}DyK _Toc510937348}DyK _Toc510937349}DyK _Toc510937349}DyK _Toc510937350}DyK _Toc510937350}DyK _Toc510937351}DyK _Toc510937351}DyK _Toc510937352}DyK _Toc510937352}DyK _Toc510937353}DyK _Toc510937353}DyK _Toc510937354}DyK _Toc510937354}DyK _Toc510937355}DyK _Toc510937355}DyK _Toc510937356}DyK _Toc510937356}DyK _Toc510937357}DyK _Toc510937357}DyK _Toc510937358}DyK _Toc510937358}DyK _Toc510937359}DyK _Toc510937359}DyK _Toc510937360}DyK _Toc510937360}DyK _Toc510937361}DyK _Toc510937361}DyK _Toc510937362}DyK _Toc510937362}DyK _Toc510937363}DyK _Toc510937363}DyK _Toc510937364}DyK _Toc510937364}DyK _Toc510937365}DyK _Toc510937365}DyK _Toc510937366}DyK _Toc510937366}DyK _Toc510937367}DyK _Toc510937367}DyK _Toc510937368}DyK _Toc510937368}DyK _Toc510937369}DyK _Toc510937369}DyK _Toc510937370}DyK _Toc510937370}DyK _Toc510937371}DyK _Toc510937371}DyK _Toc510937372}DyK _Toc510937372}DyK _Toc510937373}DyK _Toc510937373}DyK _Toc510937374}DyK _Toc510937374}DyK _Toc510937375}DyK _Toc510937375}DyK _Toc510937376}DyK _Toc510937376}DyK _Toc510937377}DyK _Toc510937377}DyK _Toc510937378}DyK _Toc510937378}DyK _Toc510937379}DyK _Toc510937379}DyK _Toc510937380}DyK _Toc510937380}DyK _Toc510937381}DyK _Toc510937381}DyK _Toc510937382}DyK _Toc510937382}DyK _Toc510937383}DyK _Toc510937383}DyK _Toc510937384}DyK _Toc510937384}DyK _Toc510937385}DyK _Toc510937385}DyK _Toc510937386}DyK _Toc510937386}DyK _Toc510937387}DyK _Toc510937387}DyK _Toc510937388}DyK _Toc510937388}DyK _Toc510937389}DyK _Toc510937389}DyK _Toc510937390}DyK _Toc510937390}DyK _Toc510937391}DyK _Toc510937391}DyK _Toc510937392}DyK _Toc510937392}DyK _Toc510937393}DyK _Toc510937393}DyK _Toc510937394}DyK _Toc510937394}DyK _Toc510937395}DyK _Toc510937395}DyK _Toc510937396}DyK _Toc510937396}DyK _Toc510937397}DyK _Toc510937397}DyK _Toc510937398}DyK _Toc510937398}DyK _Toc510937399}DyK _Toc510937399}DyK _Toc510937400}DyK _Toc510937400}DyK _Toc510937401}DyK _Toc510937401}DyK _Toc510937402}DyK _Toc510937402}DyK _Toc510937403}DyK _Toc510937403}DyK _Toc510937404}DyK _Toc510937404}DyK _Toc510937405}DyK _Toc510937405}DyK _Toc510937406}DyK _Toc510937406}DyK _Toc510937407}DyK _Toc510937407}DyK _Toc510937408}DyK _Toc510937408}DyK _Toc510937409}DyK _Toc510937409}DyK _Toc510937410}DyK _Toc510937410}DyK _Toc510937411}DyK _Toc510937411}DyK _Toc510937412}DyK _Toc510937412}DyK _Toc510937413}DyK _Toc510937413}DyK _Toc510937414}DyK _Toc510937414}DyK _Toc510937415}DyK _Toc510937415}DyK _Toc510937416}DyK _Toc510937416}DyK _Toc510937417}DyK _Toc510937417}DyK _Toc510937418}DyK _Toc510937418}DyK _Toc510937419}DyK _Toc510937419}DyK _Toc510937420}DyK _Toc510937420}DyK _Toc510937421}DyK _Toc510937421}DyK _Toc510937422}DyK _Toc510937422}DyK _Toc510937423}DyK _Toc510937423}DyK _Toc510937424}DyK _Toc510937424}DyK _Toc510937425}DyK _Toc510937425}DyK _Toc510937426}DyK _Toc510937426}DyK _Toc510937427}DyK _Toc510937427}DyK _Toc510937428}DyK _Toc510937428}DyK _Toc510937429}DyK _Toc510937429}DyK _Toc510937430}DyK _Toc510937430}DyK _Toc510937431}DyK _Toc510937431}DyK _Toc510937432}DyK _Toc510937432}DyK _Toc510937433}DyK _Toc510937433}DyK _Toc510937434}DyK _Toc510937434}DyK _Toc510937435}DyK _Toc510937435}DyK _Toc510937436}DyK _Toc510937436}DyK _Toc510937437}DyK _Toc510937437}DyK _Toc510937438}DyK _Toc510937438}DyK _Toc510937439}DyK _Toc510937439}DyK _Toc510937440}DyK _Toc510937440}DyK _Toc510937441}DyK _Toc510937441}DyK _Toc510937442}DyK _Toc510937442}DyK _Toc510937443}DyK _Toc510937443}DyK _Toc510937444}DyK _Toc510937444}DyK _Toc510937445}DyK _Toc510937445}DyK _Toc510937446}DyK _Toc510937446}DyK _Toc510937447}DyK _Toc510937447}DyK _Toc510937448}DyK _Toc510937448}DyK _Toc510937449}DyK _Toc510937449}DyK _Toc510937450}DyK _Toc510937450}DyK _Toc510937451}DyK _Toc510937451}DyK _Toc510937452}DyK _Toc510937452}DyK _Toc510937453}DyK _Toc510937453}DyK _Toc510937454}DyK _Toc510937454}DyK _Toc510937455}DyK _Toc510937455}DyK _Toc510937456}DyK _Toc510937456}DyK _Toc510937457}DyK _Toc510937457}DyK _Toc510937458}DyK _Toc510937458}DyK _Toc510937459}DyK _Toc510937459}DyK _Toc510937460}DyK _Toc510937460}DyK _Toc510937461}DyK _Toc510937461}DyK _Toc510937462}DyK _Toc510937462}DyK _Toc510937463}DyK _Toc510937463}DyK _Toc510937464}DyK _Toc510937464}DyK _Toc510937465}DyK _Toc510937465}DyK _Toc510937466}DyK _Toc510937466}DyK _Toc510937467}DyK _Toc510937467}DyK _Toc510937468}DyK _Toc510937468}DyK _Toc510937469}DyK _Toc510937469}DyK _Toc510937470}DyK _Toc510937470}DyK _Toc510937471}DyK _Toc510937471}DyK _Toc510937472}DyK _Toc510937472}DyK _Toc510937473}DyK _Toc510937473}DyK _Toc510937474}DyK _Toc510937474}DyK _Toc510937475}DyK _Toc510937475}DyK _Toc510937476}DyK _Toc510937476}DyK _Toc510937477}DyK _Toc510937477}DyK _Toc510937478}DyK _Toc510937478}DyK _Toc510937479}DyK _Toc510937479}DyK _Toc510937480}DyK _Toc510937480}DyK _Toc510937481}DyK _Toc510937481}DyK _Toc510937482}DyK _Toc510937482}DyK _Toc510937483}DyK _Toc510937483}DyK _Toc510937484}DyK _Toc510937484}DyK _Toc510937485}DyK _Toc510937485}DyK _Toc510937486}DyK _Toc510937486}DyK _Toc510937487}DyK _Toc510937487}DyK _Toc510937488}DyK _Toc510937488}DyK _Toc510937489}DyK _Toc510937489}DyK _Toc510937490}DyK _Toc510937490}DyK _Toc510937491}DyK _Toc510937491}DyK _Toc510937492}DyK _Toc510937492}DyK _Toc510937493}DyK _Toc510937493}DyK _Toc510937494}DyK _Toc510937494}DyK _Toc510937495}DyK _Toc510937495}DyK _Toc510937496}DyK _Toc510937496}DyK _Toc510937497}DyK _Toc510937497}DyK _Toc510937498}DyK _Toc510937498}DyK _Toc510937499}DyK _Toc510937499}DyK _Toc510937500}DyK _Toc510937500}DyK _Toc510937501}DyK _Toc510937501}DyK _Toc510937502}DyK _Toc510937502}DyK _Toc510937503}DyK _Toc510937503}DyK _Toc510937504}DyK _Toc510937504}DyK _Toc510937505}DyK _Toc510937505}DyK _Toc510937506}DyK _Toc510937506}DyK _Toc510937507}DyK _Toc510937507}DyK _Toc510937508}DyK _Toc510937508}DyK _Toc510937509}DyK _Toc510937509}DyK _Toc510937510}DyK _Toc510937510}DyK _Toc510937511}DyK _Toc510937511}DyK _Toc510937512}DyK _Toc510937512}DyK _Toc510937513}DyK _Toc510937513}DyK _Toc510937514}DyK _Toc510937514}DyK _Toc510937515}DyK _Toc510937515}DyK _Toc510937516}DyK _Toc510937516}DyK _Toc510937517}DyK _Toc510937517}DyK _Toc510937518}DyK _Toc510937518}DyK _Toc510937519}DyK _Toc510937519}DyK _Toc510937520}DyK _Toc510937520}DyK _Toc510937521}DyK _Toc510937521}DyK _Toc510937522}DyK _Toc510937522}DyK _Toc510937523}DyK _Toc510937523}DyK _Toc510937524}DyK _Toc510937524}DyK _Toc510937525}DyK _Toc510937525}DyK _Toc510937526}DyK _Toc510937526}DyK _Toc510937527}DyK _Toc510937527}DyK _Toc510937528}DyK _Toc510937528}DyK _Toc510937529}DyK _Toc510937529}DyK _Toc510937530}DyK _Toc510937530}DyK _Toc510937531}DyK _Toc510937531}DyK _Toc510937532}DyK _Toc510937532}DyK _Toc510937533}DyK _Toc510937533}DyK _Toc510937534}DyK _Toc510937534}DyK _Toc510937535}DyK _Toc510937535}DyK _Toc510937536}DyK _Toc510937536}DyK _Toc510937537}DyK _Toc510937537}DyK _Toc510937538}DyK _Toc510937538}DyK _Toc510937539}DyK _Toc510937539}DyK _Toc510937540}DyK _Toc510937540}DyK _Toc510937541}DyK _Toc510937541}DyK _Toc510937542}DyK _Toc510937542}DyK _Toc510937543}DyK _Toc510937543}DyK _Toc510937544}DyK _Toc510937544}DyK _Toc510937545}DyK _Toc510937545}DyK _Toc510937546}DyK _Toc510937546}DyK _Toc510937547}DyK _Toc510937547}DyK _Toc510937548}DyK _Toc510937548}DyK _Toc510937549}DyK _Toc510937549}DyK _Toc510937550}DyK _Toc510937550}DyK _Toc510937551}DyK _Toc510937551}DyK _Toc510937552}DyK _Toc510937552}DyK _Toc510937553}DyK _Toc510937553}DyK _Toc510937554}DyK _Toc510937554}DyK _Toc510937555}DyK _Toc510937555}DyK _Toc510937556}DyK _Toc510937556}DyK _Toc510937557}DyK _Toc510937557}DyK _Toc510937558}DyK _Toc510937558}DyK _Toc510937559}DyK _Toc510937559}DyK _Toc510937560}DyK _Toc510937560}DyK _Toc510937561}DyK _Toc510937561}DyK _Toc510937562}DyK _Toc510937562}DyK _Toc510937563}DyK _Toc510937563}DyK _Toc510937564}DyK _Toc510937564}DyK _Toc510937565}DyK _Toc510937565}DyK _Toc510937566}DyK _Toc510937566}DyK _Toc510937567}DyK _Toc510937567}DyK _Toc510937568}DyK _Toc510937568}DyK _Toc510937569}DyK _Toc510937569}DyK _Toc510937570}DyK _Toc510937570}DyK _Toc510937571}DyK _Toc510937571}DyK _Toc510937572}DyK _Toc510937572}DyK _Toc510937573}DyK _Toc510937573}DyK _Toc510937574}DyK _Toc510937574}DyK _Toc510937575}DyK _Toc510937575}DyK _Toc510937576}DyK _Toc510937576}DyK _Toc510937577}DyK _Toc510937577}DyK _Toc510937578}DyK _Toc510937578}DyK _Toc510937579}DyK _Toc510937579}DyK _Toc510937580}DyK _Toc510937580}DyK _Toc510937581}DyK _Toc510937581}DyK _Toc510937582}DyK _Toc510937582}DyK _Toc510937583}DyK _Toc510937583}DyK _Toc510937584}DyK _Toc510937584}DyK _Toc510937585}DyK _Toc510937585}DyK _Toc510937586}DyK _Toc510937586}DyK _Toc510937587}DyK _Toc510937587}DyK _Toc510937588}DyK _Toc510937588}DyK _Toc510937589}DyK _Toc510937589}DyK _Toc510937590}DyK _Toc510937590}DyK _Toc510937591}DyK _Toc510937591}DyK _Toc510937592}DyK _Toc510937592}DyK _Toc510937593}DyK _Toc510937593}DyK _Toc510937594}DyK _Toc510937594}DyK _Toc510937595}DyK _Toc510937595}DyK _Toc510937596}DyK _Toc510937596}DyK _Toc510937597}DyK _Toc510937597}DyK _Toc510937598}DyK _Toc510937598}DyK _Toc510937599}DyK _Toc510937599}DyK _Toc510937600}DyK _Toc510937600}DyK _Toc510937601}DyK _Toc510937601}DyK _Toc510937602}DyK _Toc510937602}DyK _Toc510937603}DyK _Toc510937603}DyK _Toc510937604}DyK _Toc510937604}DyK _Toc510937605}DyK _Toc510937605}DyK _Toc510937606}DyK _Toc510937606}DyK _Toc510937607}DyK _Toc510937607}DyK _Toc510937608}DyK _Toc510937608}DyK _Toc510937609}DyK _Toc510937609}DyK _Toc510937610}DyK _Toc510937610}DyK _Toc510937611}DyK _Toc510937611}DyK _Toc510937612}DyK _Toc510937612}DyK _Toc510937613}DyK _Toc510937613}DyK _Toc510937614}DyK _Toc510937614}DyK _Toc510937615}DyK _Toc510937615}DyK _Toc510937616}DyK _Toc510937616}DyK _Toc510937617}DyK _Toc510937617}DyK _Toc510937618}DyK _Toc510937618}DyK _Toc510937619}DyK _Toc510937619}DyK _Toc510937620}DyK _Toc510937620}DyK _Toc510937621}DyK _Toc510937621}DyK _Toc510937622}DyK _Toc510937622}DyK _Toc510937623}DyK _Toc510937623}DyK _Toc509733908}DyK _Toc509733908}DyK _Toc509733909}DyK _Toc509733909}DyK _Toc509733910}DyK _Toc509733910}DyK _Toc509733911}DyK _Toc509733911}DyK _Toc509733912}DyK _Toc509733912}DyK _Toc509733913}DyK _Toc509733913}DyK _Toc509733914}DyK _Toc509733914}DyK _Toc509733915}DyK _Toc509733915}DyK _Toc509733916}DyK _Toc509733916}DyK _Toc509733917}DyK _Toc509733917}DyK _Toc509733956}DyK _Toc509733956}DyK _Toc509733957}DyK _Toc509733957}DyK _Toc509733958}DyK _Toc509733958}DyK _Toc509733959}DyK _Toc509733959}DyK _Toc509733960}DyK _Toc509733960}DyK _Toc509733961}DyK _Toc509733961}DyK _Toc509733962}DyK _Toc509733962}DyK _Toc509733963}DyK _Toc509733963}DyK _Toc509733964}DyK _Toc509733964}DyK _Toc509733965}DyK _Toc509733965}DyK _Toc509733966}DyK _Toc509733966}DyK _Toc509733967}DyK _Toc509733967}DyK _Toc509733968}DyK _Toc509733968}DyK _Toc509733969}DyK _Toc509733969}DyK _Toc509733970}DyK _Toc509733970}DyK _Toc509733971}DyK _Toc509733971}DyK _Toc509733972}DyK _Toc509733972}DyK _Toc509733973}DyK _Toc509733973}DyK _Toc509733974}DyK _Toc509733974}DyK _Toc509733975}DyK _Toc509733975yDyK  _APPENDIX_ADyK _7._SOFTWARE_ACQUISITION_3DyK _7._REFERENCESDyK _2._SOFTWARE_SAFETYDyK _3._SOFTWARE_SAFETY_2DyK _4._SAFETY_CRITICAL_1DyK _5._SOFTWARE_SAFETY_1DyK _6._Programming_Languages_1DyK _7._SOFTWARE_ACQUISITION_3DyK _8._REFERENCESyDyK  _APPENDIX_ADyK _APPENDIX_B_Software}DyK _APPENDIX_C_1yDyK  _APPENDIX_DyDyK  _APPENDIX_EqDyK table31$$If!vh5555#v:V l4054T$$If!vh5555#v:V l05/ 4T$$If!vh5555#v:V l05/ 4T$$If!vh5555#v:V l054T$$If!vh5555#v:V l054T$$If!vh5555#v:V l054T$$If!vh5555#v:V l054T$$If!vh5555#v:V l054TqDyK table21DyK _4.2.6_Formal_Inspections$$If!vh555N#v#vN:V l40      55N/ / 4f4T$$If!vh555N#v#vN:V l40      55N/ 4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4T$$If!vh555N#v#vN:V l40      55N4f4TDyK _2.3_Preliminary_HazardDyK _2.4.3_Tools_and_1qDyK table22K$$If!v h5!55555555 5 #v!#v#v#v#v#v#v#v#v #v :V l t05!55555555 5 4a kdS$$Ifl Tu8 3MK| K$e(! t0((((4 laK$$If!v h5!55555555 5 #v!#v#v#v#v#v#v#v#v #v :V l t05!55555555 5 4a kdW$$Ifl Tu8 3MK| K$e(! t0((((4 laK$$If!v h5!55555555 5 #v!#v#v#v#v#v#v#v#v #v :V l t05!55555555 5 4a kdjZ$$Ifl Tu8 3MK| K$e(! t0((((4 la$$If!vh5x 5x 5x #vx :V s4(0+,,5x / / 44 sf4$$If!vh5x 5x 5x #vx :V s4;0+,5x / / 44 sf4$$If!vh5x 5x 5x #vx :V s4X0+5x / / 44 sf4$$If!vh5x 5x 5x #vx :V s40+5x / / 44 sf4$$If!vh5x 5x 5x #vx :V s40+5x / 44 sf4$$If!vh5x 5x 5x #vx :V s40+5x / / 44 sf4$$If!vh5x 5x 5x #vx :V s405x / 44 sf4$$If!vh5x 5x 5x #vx :V s405x / 44 sf4qDyK table23$$If!vh55#v#v:V l455/  / / /  / 4f4Tc$$If!vh55)55'5F5#v#v)#v#v'#vF#v:V l4,55)55'5F5/ / / /  /  / / / / / / 4f4T$$If!vh55)55'5F5#v#v)#v#v'#vF#v:V l4 <55)55'5F5/  / / / /  /  / / / / / / 4f4p<T$$If!vh55)55'5F5#v#v)#v#v'#vF#v:V l4 255)55'5F5/ /  / / / / / / 4f4p2T$$If!vh55)55'5F5#v#v)#v#v'#vF#v:V l4 (55)55'5F5/ /  / / / / / / 4f4p(Tz$$If!vh55)55'5F5#v#v)#v#v'#vF#v:V l4 55)55'5F5/ /  /  / / / / / 4f4pTDyK _3._SOFTWARE_SAFETY_2$$If!vh55#v#v:V l0,554T$$If!vh55#v#v:V l0,55/ 4T$$If!vh55#v#v:V l0,554T$$If!vh55#v#v:V l0,554T$$If!vh55#v#v:V l0,554T$$If!vh55#v#v:V l0,554TDyK _2.3.1.1_Identifying_HazardssDyK  figure22DyK yK @http://wwwsrqa.jsc.nasa.gov/pcesDyK  figure22DyK Ohttp://books.usapa.belvoir.army.mil/cgi-bin/bookmgr/BOOKS/P385_16/FIGFIGUNIQ10yK http://books.usapa.belvoir.army.mil/cgi-bin/bookmgr/BOOKS/P385_16/FIGFIGUNIQ10$$If!vh5 585#v #v8#v:V l0i5 585/  /  / / /  4a#$$If!vh5s5585#vs#v#v8#v:V li5s5585/  / / /  / / /  / / /  4a$$If!vh5 5#v #v:V li5 5/  / 4a$$If!vh555#v#v#v:V li555/  4a$$If!vh555#v#v#v:V lpi555/ 4ay$$If!vh5 5#v #v:V li5 5/  4adDeCheck1dDeCheck1$$If!vh5 5h5b#v #vh#vb:V l|i5 5h5b/ / / / /  4ac$$If!vh5O)#vO):V li5O)/  4ac$$If!vh5O)#vO):V l0i5O)/ 4ac$$If!vh5O)#vO):V li5O)/  4ac$$If!vh5O)#vO):V li5O)/ 4ac$$If!vh5O)#vO):V li5O)/  4ac$$If!vh5O)#vO):V li5O)/ 4ac$$If!vh5O)#vO):V li5O)/  4ac$$If!vh5O)#vO):V li5O)/ 4ac$$If!vh5O)#vO):V li5O)/  4ac$$If!vh5O)#vO):V li5O)/ 4ac$$If!vh5O)#vO):V li5O)/  4a$$If!vh55t"#v#vt":V l8i55t"/  / / 4a$$If!vh55t"#v#vt":V l8i55t"/  / / 4a$$If!vh55t"#v#vt":V li55t"/ / / /  4a$$If!vh55(5 #v#v(#v :V l i55(5 / 4a$$If!vh55(5 #v#v(#v :V l i55(5 / 4a$$If!vh55(5 #v#v(#v :V l i55(5 / 4a$$If!vh55(5 #v#v(#v :V l i55(5 / 4aP$$If!vh5*#v*:V l5*4aDyK  _5.1.2_Requirements_CriticalityqDyK table23DyK _3._SOFTWARE_SAFETY_2DyK _3._SOFTWARE_SAFETY_2DyK _4._SAFETY_CRITICAL_1DyK _5._SOFTWARE_SAFETY_1DyK _6._Programming_Languages_1DyK _7._SOFTWARE_ACQUISITION_3DyK _2.3.1.2_Risk_Levels}DyK _3.2_Scope_ofqDyK table31$$If!vh5b55 5P #vb#v#v #vP :V x440      5b55 5P / / 44 xf4TDyK _2.3_Preliminary_HazardDyK _2.3.1_PHA_Approach$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P / 44 xf4TDyK _4.2.1.1_Safety_RequirementsDyK _4.2.1_Development_ofDyK _5.1_Software_SafetyDyK  _5.1.2_Requirements_CriticalityDyK _5.1.3_Specification_AnalysisDyK _5.1.6_Software_FaultDyK _5.1.5_Timing,_ThroughputDyK _5.1.4_Formal_Inspections$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4TDyK _5.2.1_Update_CriticalityDyK _5.2.2_Conduct_HazardDyK _5.2.3_Analyze_ArchitecturalDyK "_5.2.4.1_Interdependence_AnalysisDyK _5.2.4.2__IndependenceDyK _5.2.8__Formal$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4TDyK _5.3.1_Design_LogicDyK _5.3.2_Design_DataDyK _5.3.3_Design_InterfaceDyK _5.3.4_Design_ConstraintDyK _5.3.6_Dynamic_FlowgraphDyK _5.3.11_Requirements_StateDyK _5.3.13_Software_Failure$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4TDyK _5.4.1_Code_LogicDyK _5.4.2_Code_DataDyK _5.4.3_Code_InterfaceDyK _5.4.6_Formal_CodeDyK _5.4.8_Unused_CodeDyK _5.4.9_Interrupt_Analysis$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4TDyK _4.6.1_Testing_TechniquesDyK _4.6.6_Regression_TestingDyK _4.6.7_Software_SafetyDyK _4.6.6_Test_WitnessingDyK _5.5.1_Test_CoverageDyK _5.5.5_Test_ResultsDyK _5.5.3_Reliability_Modeling$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4T$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4T$$If!vh5b55 5P #vb#v#v #vP :V x40      5b55 5P 44 xf4TDyK _2.4.1.2_Risk_Levels$$If!vh5N#vN:V l ` 05N4p T$$If!vh5N#vN:V l ` 05N4p T$$If!vh5N#vN:V l ` 05N4p TDyK _2.3.1.2_Risk_Levels$$If!vh55#v#v:V l4 ``55/ / / / / 4f4pT$$If!vh55#v#v:V l455/ /  / / / 4f4T$$If!vh55#v#v:V l455/  / / / / 4f4T$$If!vh55#v#v:V l455/  / / / /  4f4T$$If!vh55#v#v:V l455/ /  / /  / 4f4T$$If!vh55#v#v:V l455/  / / / / 4f4T$$If!vh55#v#v:V l455/  / / / /  4f4T$$If!vh55#v#v:V l455/ /  / /  / 4f4T$$If!vh55#v#v:V l455/  / / / / 4f4T$$If!vh55#v#v:V l455/  / / / / / 4f4TqDyK table23$$If!vh55L#v#vL:V l4 ``0+55L/ / / / / 4f4pTl$$If!vh55555#v#v#v#v#v:V l40+55555/ / / / /  / / / /  4f4p T$$If!vh55555#v#v#v#v#v:V l4 (055555/ /  /  /  / / / /  / 4f4p(T$$If!vh55555#v#v#v#v#v:V l4 `(055555/  /  /  / / / / / 4f4p(T$$If!vh55555#v#v#v#v#v:V l4 055555/  /  /  / / / / / 4f4pT$$If!vh55555#v#v#v#v#v:V l4 `055555/  /  /  /  / / / / 4f4pTqDyK table34$$If!vh5 5#v #v:V l ``05 5/ / 4pT$$If!vh5 5#v #v:V l0,5 5/ 4T$$If!vh5 5#v #v:V l0,5 54T$$If!vh5 5#v #v:V l0,5 54T$$If!vh5 5#v #v:V l0,5 54TsDyK  figure323Dd0* bbX  C 4Arisk indexes2b3[Oƭ 2BGn2[Oƭ PNG  IHDRPLTE=y؜bKGDH cmPPJCmp0712Hs/\IDATx^흍* KtC(Jޙ T*xP{ZF<*Qb4xTh2G!dQ ЏJ'C"@N.@GE@?*\ &~T:Mtr1 <*Qb4xT4]2JIޣ|>G]!hi*|_e4ړ€n&ЯŀUO62;#RElI'm?MƚbCZ#1vN ~ ty}XJnM]2b6_nt}}VxcCy,=Cb|YZ[}G[ҙ,l2">*Gہ~K>B&FDoQdO7a#nlZa)'!i##3l96݀"_.mU jw݇v@۷ⴐ(Ɩ@Ɠ hJdUP~oX.rEtDg= |" rKe_SSQ=a9>!K#սe>|%W"`z+oF }r̯JFݖ0t> @x_E7<@41vл~\@Zf _Mc87[(h:%Zw 8k;=qk&{oB53$N$d詶xGg[@2y|G]&2lֳ%Nz'x[h!<:h5yG`EdBpv|쾨B+"S u?G7èOS-A Iɻ#:G\%r$$N~5̴q?MK#Gd ]P[1|۔LkKqd46.ytJ@U~Xn;f߯u5o-AǑӄ|dr$LSQdS/7&/]~'+"3 tYBo8?YrI\C'+QH> Т lj~H=tZn ߀pƶ"W+|92/ɷs-xoҖimXnj.|Դh㪛B\%|m'45ڸ$ ZN}ݠO܊}Tm8&k%? F.f8&[%{[NC޴eji?M͗JvCӖ㖅 = w~d?2_T{;xGG)8nH;hAGGzCo%&p/:r?Rp:8n 4ZfO%_z}6 t(p%pgs 9T I{gG| vKPV+t:~&Y>vo9l^ȍ霨:vE_NÄ8E/a|L0^U3oCV}:tIV/_r$@J).n JNtĊՐjj5Ѕ\aqtSx=XSm \aSۑc; mpԲKIjaW[2mwaV?.O#Yo]\VD.Oq.u6i熰Q]#5cQUumuhHpX Xm|BeA/dW9&Ez 1@VMȗ,GW;,?XCJZ8Vh#jRӇ~8-=_#Wiz|zrE&K@^l4ڸ/Щ /[r5&`6WeJ[Mt#D\W9Ҏ##b#r|[2Gʫ*0 VxR.1TMgxhD&>h(LF-"@BLK""2aMqZf%!V!!2&0,j@hY9TP N 4ʬ **'#eZ  #eV < eVD| dVf Q I{ϟȻĤJoB팮Y8f4G4ڸfilKf<xT v/і "Oĺɣ7zBt>3?B2h5`T)T*-;ǓH>>i-/).nA^t-БIAU`hĜqo > R VQE<^-&:6F+>UӺBg݀dY~|HɓE΁)圦#ܧG2`I2ܵΜ3-&uZZ&uqz!-q l@w#TĖđ퓾[dqzjYlUN &@uV1p~'?uY9OgΥx!4ؖur Ƨd.NƮ|cW@>iq?r&vI.%I]rTT@ZI Ό4¤|$6бUc^eM.[mX֟t)֫y}<:B2q2|*Fe/p{?'8n|NS!qV^>,h慾]/B!r,m&NHX蝀bq_Z\yyac65@1 ku /|"@6EUnr TG4Vf.vkS.i9K_,Z|sqi=h4Neutm^:B,HL4^ϒ.sTjw?}@+C&nۈ 08o`Hh4Rm9,=g:[P.!"~hO+#oZ)|qX[VhtH,MV|:ӐDX6c* ֠dBB 47s!aBh :ޖ=P.!|@_ SJ!-?8:MK-G_!].dK?WWgK?S߬?A$: `t|BGJx%v$t{אf(Z]yZ"`,r|刧N-1]t-|@ ]PpqS -9Bo :LCJ}0}/Gޅk*xV@-11=Q *DAx ~B,LdZ=th СY8v+1o] ,llN[hڹBץsޭQbe-z.=.R*tZ*0rZcQhDV{T&:OstilrDexF1vQ<[;~h׮ZZbSbm@PP }I9@+ b:ol-u]2#(g/q)LZ @/4@ 'r`D ej!%z+ar…[hB[S'e=/7Fl1\^sl9I@K'+4^jj<vA6?\4a-ذ+J e,fl޸̖-:( C 4\YkN]*?7o | E~ Ѳq:R`J@5I-+M![EG^E+t{є@p6z˺ \ x3j1\4Mc:גA1 bF@=cz:rMax MP~SHbf]NZLhNsS"Wk-ǟ7c}PP" Eڪ,.\jpz<58&lS;5e_c EvP W0D4vB Eh8v\#|B!D %@D "Z {F/'Wjʵ:ޭU{ֱB[ mEK+rZ ͏CS*Q{G-cטm-G0V"Q$4's0k'_a9GDcr`Od>av@W CmghIjf hiG/+Ḏ.Qh}" bS 4hW=a|_kK.ro$T jyXR^MrtvP Z02&(EiŚ8^0\4B@O_f8!.uY2Zgp$ 2Z~qܤ&%ɛ<"#fޘ1Lp}.\ΎEnP X2=~NOkǙMq^EIhrQ =|LtnL5OqM nhA آi@- b3[3N`GZ-?M=;s=…2ڍ:_-WB[6{5٠sw|IڍLL?92qi@sw|IڍQ-m8iT;q`7+S.l\q:xkNhّ]$k7N'H2hᛕ]P08]6U5!PJp~NҸfv9Iwsʇ6iCCJp~JҸ ϡ6 moMX HNK\>N:i;gX5}ZyL/GK/ -]H6;4:2wpwI@@ǑS " 7}0?Ln b'Ћ@'߮gLE?گ*ttZs~qO}_5GoU:mr6{bD:wщC})R%5Oq@0j>ҋǺ'ٶqw-@-Zԅ$l 3mw¨)TpOjlZnS4WqܸQ=LW%iAݢb (roѹ:_z).pgQ‘mݿ;{ic {]=_C@i Qq3S_Vo6_jhy?MJ#X럋f h_3!j & Y}ؚF͛՘ښMWr³BK0 H.] $e h\Q9,YZϸOw U?Jwo@gxYi'mZ29n 55sR4҇ (EHƟ@݇-@*pzNSDcE/qo6_<fK;[ޛ2O)6tnL@z&W/AN* &zx|T:79j7&x!!ҾN" )c@څLPr @i>iU!rz?iK}4=[~,߯뇪 & W@xs(ݤe2}UFIǽ4"ۅZHJ1e?-4Z=Q Q. TU&8|BW2)SLЖV#75ll dsrQI(GTWqV#Bx>#yF/ǐMQn[ssq2ZCZB#J.Cq:gt_: @KKScOSzϩ'YVY^o*~xHUɈ2. hq.uOb"_#vw~W`(Nf}@HV]IENDB`qDyK table35qDyK table33DyK _3.2.3.3_Tailoring_the$$If!vh55L#v#vL:V l4 ``,55L/ / / / /  4f4pT$$If!vh55555#v#v:V l4,55/ / / / / / / / / / 4f4Tb$$If!vh55555#v#v:V l4 2,55/ / / / / /  / 4f4p2T$$If!vh55555#v#v:V l4 2,55/ / / / / / / /  / / 4f4p2T$$If!vh55555#v#v:V l4 2,55/ / / / / / / /  /  /  / 4f4p2Tn$$If!vh55555#v#v:V l4 ,55/ /  / / / / /  /  /  / / 4f4pT$$If!vh5e5f5l#ve#vf#vl:V l05e5f5l4T$$If!vh5e5f5l#ve#vf#vl:V l05e5f5l/ 4T$$If!vh5e5f5l#ve#vf#vl:V l05e5f5l4T$$If!vh5e5f5l#ve#vf#vl:V l05e5f5l4T$$If!vh5e5f5l#ve#vf#vl:V l05e5f5l4T$$If!vh5e5f5l#ve#vf#vl:V l05e5f5l4TqDyK table31DyK _7._SOFTWARE_ACQUISITION_3DyK _5.1.1_Software_SafetyDyK _4._SAFETY_CRITICAL_1DyK _5._SOFTWARE_SAFETY_1$$If!vh5n 5#vn #v:V l* 05n 5/ 4p$$If!vh5n 5#vn #v:V l ``05n 5/ 4p$$If!vh5n 5#vn #v:V l @@05n 5/ 4p$$If!vh5n 5#vn #v:V l4 05n 5/ / 4f4pDyK _4._SAFETY_CRITICAL_1DyK _5._SOFTWARE_SAFETY_2qDyK table37sDyK  table313qDyK table35$$IfD !vh5V#vV:V l4 t0  5V/ 4aD f4$$IfD !vh5b5 #vb#v :V l t0  5b5 / 4aD $$IfD !vh5b5 #vb#v :V l t0  5b5 4aD $$IfD !vh5b5 #vb#v :V l t0  5b5 4aD $$IfD !vh5b5 #vb#v :V l t0  5b5 4aD qDyK table37sDyK  table313yDyK  _APPENDIX_E$$If!vh55W#v#vW:V s40    55W/ / / / / 44 sf4T($$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / /  / / / 44 sf4TDyK _2.3_Preliminary_Hazard$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / / 44 sf4TDyK _5.1.1_Software_Safety$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _5.1.1.1_Checklists_and$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK  _5.1.2_Requirements_Criticality$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _4.2.2__Generic$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _5.1.3_Specification_Analysis$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _4.2.4_Formal_Methods$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _4.2.6_Formal_Inspections$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _5.1.5_Timing,_Throughput$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / 44 sf4TDyK _5.1.6_Software_Fault$$If!vh55T55U#v#vT#v#vU:V s40    55T55U/ / / 44 sf4T$$If!vh55W#v#vW:V l40      55W/ / / / / 4f4TI$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / /  /  / / / / / 4f4TDyK _4.3.2_Selection_ofDyK _7.1_Off-the-Shelf_SoftwareI$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / /  /  / / / / / 4f4TDyK _4.3.3_Selection_ofI$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / /  /  / / / / / 4f4TDyK _4.3.5_Coding_StandardsI$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / /  /  / / / / / 4f4TDyK _5.2.1_Update_Criticality$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / / 4f4TDyK _5.2.2_Conduct_Hazard$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK _5.2.3_Analyze_Architectural$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK "_5.2.4.1_Interdependence_Analysis$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK _5.2.4.2__Independence$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK _5.2.5_Update_Timing,$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK _5.2.6_Update_Software$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK _5.2.7_Formal_Inspections$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / 4f4TDyK _5.2.8__Formal$$If!vh55T55U#v#vT#v#vU:V l40      55T55U/ / / 4f4T$$If!vh5"54#v"#v4:V l40      5"54/ / / / / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / / /  / / / 4f4T}DyK _4.2.5__Model$$If!vh5"555#v"#v#v:V l40      5"55/ / / 4f4TDyK _5.3.1_Design_Logic$$If!vh5"555#v"#v#v:V l40      5"55/ / / 4f4TDyK _5.3.2_Design_Data$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.3_Design_Interface$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.4_Design_Constraint$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.5_Design_Functional$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.6_Software_Element$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.7_Rate_Monotonic$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.8_Dynamic_Flowgraph$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.9_Markov_Modeling$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.10_Measurement_of$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.11_Selection_of$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.12_Formal_Methods$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.13_Requirements_State$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.14_Formal_Inspections$$If!vh5"555#v"#v#v:V l40      5"55/ /  / 4f4TDyK _5.3.15_Software_Failure$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.3.16_Updates_to$$If!vh5"555#v"#v#v:V l40      5"55/ /  / 4f4T$$If!vh5"54#v"#v4:V l40      5"54/ / / / / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / / /  / / / 4f4TDyK _4.5.1_Coding_Checklists$$If!vh5"555#v"#v#v:V l40      5"55/ / / /  / / / 4f4TDyK _4.5.2_Defensive_Programming$$If!vh5"555#v"#v#v:V l40      5"55/ / / /  / / / 4f4TDyK _4.5.3_Refactoring$$If!vh5"555#v"#v#v:V l40      5"55/ / / /  / / / 4f4TDyK _5.4.1_Code_Logic$$If!vh5"555#v"#v#v:V l40      5"55/ / / 4f4TDyK _5.4.2_Code_Data$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.3_Code_Interface$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.4_Update_Measurement$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.5_Update_Design$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.6_Formal_Code$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.7_Applying_Formal$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.8_Unused_Code$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.9_Interrupt_Analysis$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.10__Final$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.11_Program_Slicing$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4TDyK _5.4.12_Update_Software$$If!vh5"555#v"#v#v:V l40      5"55/ / / 4f4T$$If!vh5"5F#v"#vF:V l40      5"5F/ / / / / 4f4T$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _4.5.4_Unit_Level$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _4.6.3_Integration_Testing$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _4.6.4_System_Testing$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _4.6.5__Software$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _4.6.7_Software_Safety$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _7.1.4_Who_Tests$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _5.5.1_Test_Coverage$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _5.5.2_Formal_Inspections$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _5.5.3_Reliability_Modeling$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _5.5.4_Checklists_of$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK _5.5.5_Test_Results$$If!vh5"555#v"#v:V l40      5"5/ / 4f4TDyK  _5.5.6_Independent_Verification$$If!vh5"555#v"#v:V l40      5"5/ / / 4f4T$$If!vh5"54#v"#v4:V l40      5"54/ / / / / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / 4f4T$$If!vh5"555#v"#v#v:V l40      5"55/ / / 4f4T$$If!vh5"5#v"#v:V l40      5"5/ / / / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4T$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4TDyK _5.5.3_Reliability_Modeling$$If!vh5"555 #v"#v#v :V l40      5"55 / / 4f4TDyK _5.5.4_Checklists_of$$If!vh5"555 #v"#v#v :V l40      5"55 / / / 4f4T$$If!vh55#v:V l ``05/ / / / /  4pT$$If!vh55#v:V l05/  / / / 4T$$If!vh55#v:V l05/  / / 4T$$If!vh55#v:V l05/  / / 4T$$If!vh55#v:V l05/  / / 4T$$If!vh55#v:V l05/ / / /  4TDyK _2.3_Preliminary_HazardDyK _4.2.2__Generic$$If!vh5X5 #vX#v :V l 05X5 / / 4p$$If!vh5X5 #vX#v :V l 05X5 / 4p$$If!vh5X5 #vX#v :V l05X5 / 4$$If!vh5X5 #vX#v :V l05X5 / 4$$If!vh5X5 #vX#v :V l 05X5 / 4p$$If!vh5X5 #vX#v :V l 05X5 / / 4pDyK _5.1.1_Software_SafetyDyK _E.2_Generic_SoftwareDyK _4.3_Architectural_DesignDyK _Glossary_of_TermsDyK _5.1.5_Timing,_Throughput5DyK 0http://www.abo.fi/~johan.lilius/mc/mclinks.htmlyK `http://www.abo.fi/~johan.lilius/mc/mclinks.htmlDyK yK `http://www.math.hmc.edu/~jpl/modelcheckers.htmlDyK yK nhttp://netlib.bell-labs.com/netlib/spin/whatispin.htmlDyK yK Vhttp://www.cs.cmu.edu/~modelcheck/smv.html-DyK .http://www-cad.EECS.Berkeley.EDU/~tah/HyTech/yK \http://www-cad.eecs.berkeley.edu/~tah/HyTech/uDyK @http://www-verimag.imag.fr//TEMPORISE/kronos/index-english.htmlyK http://www-verimag.imag.fr//TEMPORISE/kronos/index-english.htmlDyK http://www.brics.dk/mona/yK 4http://www.brics.dk/mona/%DyK ,http://sprout.stanford.edu/dill/murphi.htmlyK Xhttp://sprout.stanford.edu/dill/murphi.htmlDyK yK ^http://www.cis.upenn.edu/~lee/inhye/treat.htmlDyK http://tvs.twi.tudelft.nl/yK 6http://tvs.twi.tudelft.nl/DyK http://rodin.stanford.edu/yK 6http://rodin.stanford.edu/=DyK 2http://www.docs.uu.se/docs/rtmv/uppaal/index.htmlyK dhttp://www.docs.uu.se/docs/rtmv/uppaal/index.html)DyK -http://www.cs.cmu.edu/~modelcheck/verus.htmlyK Zhttp://www.cs.cmu.edu/~modelcheck/verus.htmlDyK 'http://www-cad.eecs.berkeley.edu/~vis/yK Nhttp://www-cad.eecs.berkeley.edu/~vis/mDyK >http://www.cadence.com/eda_solutions/flv_fveimc_l3_index.htmlyK |http://www.cadence.com/eda_solutions/flv_fveimc_l3_index.htmlDyK 'http://www.time-rover.com/TRindex.htmlyK Nhttp://www.time-rover.com/TRindex.htmlDyK *http://www.rationalrose.com/modelchecker/yK Thttp://www.rationalrose.com/modelchecker/qDyK table42m$$If!vh5`'#v`':V l45`'/  / 4f4T$$If!vh55555#v:V l45/  / / / / / /  4f4T$$If!vh55555#v:V l45/  /  / / / /  /  4f4TDyK _7.1_Off-the-Shelf_SoftwareDyK _6._Programming_Languages_1DyK _5.1.3_Specification_AnalysisDyK _4.2.1_Development_ofDyK _6.15_Good_ProgrammingDyK _4.6.7_Software_SafetyDyK yK >http://www.softwareqatest.com/DyK _4.6.4_System_TestinguDyK @http://www.cigital.com/presentations/testing_objects/sld001.htmyK http://www.cigital.com/presentations/testing_objects/sld001.htmDyK yK Lhttp://www.rbsc.com/pages/ootbib.html!DyK +http://www.cetus-links.org/oo_testing.htmlyK Vhttp://www.cetus-links.org/oo_testing.html DyK %http://www.rbsc.com/pages/myths.htmlyK Jhttp://www.rbsc.com/pages/myths.htmleDyK <http://www.stsc.hill.af.mil/crosstalk/1995/apr/testinoo.aspyK xhttp://www.stsc.hill.af.mil/crosstalk/1995/apr/testinoo.aspDyK _5.4.11_Program_SlicingDyK _3.3_Incorporating_SoftwareDyK _5.1_Software_SafetyDyK _5.2_Architectural_DesignDyK _5.3_Detailed_DesignDyK _5.4_Code_AnalysisDyK _5.5_Test_AnalysisDyK _5.6_Operations_&DyK _4.2.1.1_Safety_RequirementsDyK yK xhttp://tide.it.bond.edu.au/inft390/002/Resources/sysreq.htmDyK _4.2.4_Formal_MethodsDyK _4.2.1_Development_ofDyK _4.2.6_Formal_InspectionsDyK _5.1.2.1_Critical_SoftwareDdF  C "A doc2b4J*GPNs~Gn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF  C "A doc2b4J*GPNsGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh55#v:V l054DdF  C "A doc2b4J*GPNs"Gn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF  C "A doc2b4J*GPNsGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh55#v:V l054DdF  C "A doc2b4J*GPNsGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF  C "A doc2b4J*GPNsVGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh55#v:V l054DdF  C "A doc2b4J*GPNsjGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF   C "A  doc2b4J*GPNsGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh55#v:V l054DdF   C "A  doc2 b4J*GPNsGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF   C "A  doc2 b4J*GPNsGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh55#v:V l054DyK _4.2.5_Formal_InspectionsDyK _4.2.2.2__Hazardous$$If!vh5$5#v$#v:V l4 0      +5$54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l4\0      +5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 5/ 4f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4T$$If!vh5$5q5S 5#v$#vq#vS #v:V l40      5$5q5S 54f4TP$$If!vh5#v:V l54Tf$$If!vh55#v#v:V l554Tf$$If!vh55#v#v:V l554Tf$$If!vh55#v#v:V l554Tf$$If!vh55#v#v:V l554TDyK  _5.1.2_Requirements_CriticalityDyK _2.3.1.2_Risk_LevelsDdF   C "A  doc2 b4J*GPNs 1Gn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF   C "A  doc2 b4J*GPNs9Gn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh5[5=#v[#v=:V l05[5=4DdF  C "A doc2 b4J*GPNsBGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF  C "A doc2b4J*GPNsLKGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh5[5=#v[#v=:V l05[5=4DdF  C "A doc2b4J*GPNsnTGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF  C "A doc2b4J*GPNs\Gn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh5[5=#v[#v=:V l05[5=4DdF  C "A doc2b4J*GPNs fGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`DdF  C "A doc2b4J*GPNsnGn4J*GPNsPNG  IHDR@@PLTE)))999RRRsss{{{99RRss{{9j^YbKGDH cmPPJCmp0712HsOIDATXGVv -`icvf<z7oۧ\Ķ۞\9*q4H>ABH˂8`~Y `9?DJ0N\# bY/k3| 54d\{79FJWzkGw#* ؓpdnQRR!GfgGA]-f Lx襭2`ɼ{ID Qn( 3 +Y`#T'<4l5 `P5|T=iZC3ʙE;fڙKB]w㬃4Y1M㻳OvGʃ&c` v;z' (p֒IErAHǏiUGg- gI& `x4_S.LHe4Blp)&TUQB8pTUH*@blt7OB=\*gwآ`MkU!f>TW 8LeqzP J8Y2 W`ɩۛ"sZ悁s&Y50 2en@H$L'V|u;vh6!Օ&}I\Pa9v>V^(QE5el*m{>eD eAl u=wP$s/B`Wb&~Cx pMЭG?.{IENDB`$$If!vh5[5=#v[#v=:V l05[5=4DyK _4.2.6_Formal_InspectionsDyK _4.2_Software_RequirementsqDyK table42DyK _APPENDIX_B_SoftwareDyK _4.3_Architectural_DesignDyK _5.2_Architectural_DesignDyK _4.2.1_Development_ofDyK _5.1_Software_SafetyDyK  _5.1.2_Requirements_Criticality$$If!vh585x#v8#vx:V l0585x4TDyK _2.3_Preliminary_Hazard$$If!vh585x#v8#vx:V l0585x4TDyK _2.5_Software_Subsystem$$If!vh585x#v8#vx:V l0585x4TDyK _5.1_Software_Safety$$If!vh585x#v8#vx:V l0585x4TDyK  _5.1.2_Requirements_CriticalityDyK _5.2.1_Update_CriticalityqDyK table22DyK _4.2.6_Formal_InspectionsDyK _4.2.2.3__HazardousDyK _5.1.4_Formal_InspectionsDyK _3.2.3.3_Tailoring_theDyK _5.1.5_Timing,_ThroughputDyK _5.1.6_Software_FaultDyK yK ,http://www.ifpug.org/DyK _5.3.8.1_Function_PointsDyK _6._Programming_Languages_1DyK _4.2.3.1_Object_OrientedDyK _4.2.4_Formal_MethodsyDyK  _APPENDIX_DDyK _APPENDIX_C_SoftwareDyK _5.4.5_Update_DesignDyK _5.3.4_Design_Constraint}DyK _5.4.9__FinalDyK _4.2.6_Formal_InspectionsDyK _E.4_Checklist_of_1DyK _4.6__SoftwareDyK _4.2.6_Formal_Inspections-DyK .http://rac.iitri.org/DATA/RMST/rel_model.htmlyK \http://rac.iitri.org/DATA/RMST/rel_model.htmlDyK http://www.meadep.com/yK .http://www.meadep.com/1DyK /http://www.icaen.uiowa.edu/~ankusiak/reli.htmlyK ^http://www.icaen.uiowa.edu/~ankusiak/reli.htmlDyK 'http://www.cs.colostate.edu/~cs630/rh/yK Nhttp://www.cs.colostate.edu/~cs630/rh/9DyK 1http://techreports.jpl.nasa.gov/1993/93-1886.pdfyK bhttp://techreports.jpl.nasa.gov/1993/93-1886.pdfeDyK <http://www.stsc.hill.af.mil/crossTalk/1995/feb/Reliable.aspyK xhttp://www.stsc.hill.af.mil/crossTalk/1995/feb/Reliable.aspDyK Ghttp://satc.gsfc.nasa.gov/suport/software_metrics_and_reliability.htmlyK http://satc.gsfc.nasa.gov/suport/software_metrics_and_reliability.html=DyK 2http://www.cse.cuhk.edu.hk/~lyu/book/reliability/yK dhttp://www.cse.cuhk.edu.hk/~lyu/book/reliability/DyK _4.6__SoftwareaDyK ;http://www.chillarege.com/authwork/TestingBestPractice.pdfyK vhttp://www.chillarege.com/authwork/TestingBestPractice.pdfDyK http://www.io.com/~wazmo/qa/yK :http://www.io.com/~wazmo/qa/DyK http://www.sqatest.comyK 0http://www.sqatest.com/DyK yK >http://www.softwareqatest.com/DyK _4.8_Software_OperationsDyK http://www.softwareqatest.comyK >http://www.softwareqatest.com/DyK bhttp://www.cs.hmc.edu/tech_docs/qref/rational/DevelopmentStudioUNIX.1.1/docs/html/rup_ada/ada.htmyK http://www.cs.hmc.edu/tech_docs/qref/rational/DevelopmentStudioUNIX.1.1/docs/html/rup_ada/ada.htmDyK http://wombat.doc.ic.ac.uk/yK 8http://wombat.doc.ic.ac.uk/DyK )http://www.ddci.com/products/SCORoot.htmyK Rhttp://www.ddci.com/products/SCORoot.htmDyK yK 4http://www.windriver.com/DyK yK *http://www.enea.com/DyK 'http://www.mentor.com/embedded/vrtxos/yK Nhttp://www.mentor.com/embedded/vrtxos/DyK yK nhttp://www.windriver.com/products/html/psosystem3.htmlDyK yK (http://www.qnx.com/DyK yK (http://www.cmx.com/DyK yK hhttp://www.microware.com/Products/Software/OS9.htmlDyK http://www.kadak.com/yK ,http://www.kadak.com/DyK yK dhttp://www.lynuxworks.com/products/whatislos.htmlDyK 'http://www.mentor.com/embedded/vrtxos/yK Nhttp://www.mentor.com/embedded/vrtxos/DyK yK ,http://www.rtems.com/DyK http://www.linux.orgyK ,http://www.linux.org/DyK yK >http://www.embedded-linux.org/DyK http://www.microsoft.comyK 4http://www.microsoft.com/DyK http://www.microsoft.comyK 4http://www.microsoft.com/DyK http://vic.lerc.nasa.gov/yK 4http://vic.lerc.nasa.gov/DyK _6.11_Distributed_ComputingDyK yK ~http://msdn.microsoft.com/workshop/author/script/weberrors.aspDyK Hhttp://www.soft.com/eValid/Technology/White.Papers/website.testing.htmlyK http://www.soft.com/eValid/Technology/White.Papers/website.testing.htmlDyK $http://www.dbmsmag.com/9707i03.htmlyK Hhttp://www.dbmsmag.com/9707i03.html DyK &http://solo.dc3.com/white/wsperf.htmlyK Lhttp://solo.dc3.com/white/wsperf.htmlDyK !http://www.pantos.org/35317.htmlyK Bhttp://www.pantos.org/35317.htmlDyK _6.14.3_Case_StudyDyK http://www.its.dot.gov/yK 0http://www.its.dot.gov/}DyK _4.2.4__ModelDyK _6.14.3.2_Testing_andDyK _4.2.3_Formal_MethodsDyK _7.1_Off-the-Shelf_SoftwareDyK #_7.2_Contractor-developed_SoftwareDyK _6.10__OperatingDyK _E.1_Checklist_forDyK _5._SOFTWARE_SAFETY_2DyK _E.1_Checklist_forDyK _7.1.4_Who_TestsDyK _7.1.2.3_Adding_newDyK _7.1.2.4_Dealing_withDyK _4._SAFETY_CRITICAL_1DyK _5._SOFTWARE_SAFETY_2DyK _3.2.3.3_Tailoring_theDyK _7.2.2_Monitoring_ContractorDyK *http://www.esconline.com/98fallpapers.htmyK Thttp://www.esconline.com/98fallpapers.htmDyK yK http://www.serc.net/TechReports/abstracts/catagory/Reliability.htmlDyK yK \http://www.embedded.com/98/9812/9812feat2.htmDyK yK `http://www.embedded.com/2000/0009/0009feat4.htmDyK yK Nhttp://www.embedded.com/98/9803fe3.htmDyK yK `http://www.embedded.com/1999/9910/9910feat1.htmDyK yK `http://www.embedded.com/1999/9911/9911feat2.htmmDyK >http://www.isdmag.com/design/embeddedtools/embeddedtools.htmlyK |http://www.isdmag.com/design/embeddedtools/embeddedtools.htmlDyK yK http://www.sei.cmu.edu/publications/documents/90.reports/90.tr.011.htmlDyK "http://www.optimagic.com/faq.htmlyK Dhttp://www.optimagic.com/faq.htmlDyK yK Zhttp://www.embedded.com/1999/9906/9906sr.htmDyK yK fhttp://www.sciam.com/0697issue/0697villasenor.htmlDyK yK `http://www.embedded.com/2000/0011/0011feat5.htm-DyK .http://www.embedded.com/1999/9911/9911ia2.htmyK \http://www.embedded.com/1999/9911/9911ia2.htmDyK yK lhttp://www.devicelink.com/mddi/archive/99/01/013.htmlDyK 'http://arti.vub.ac.be/~cyrano/AUTOSYS/yK Nhttp://arti.vub.ac.be/~cyrano/AUTOSYS/DyK yK xhttp://www.stsc.hill.af.mil/crosstalk/2000/jan/fischman.asp DyK %http://www.fda.gov/cdrh/ode/1252.pdfyK Jhttp://www.fda.gov/cdrh/ode/1252.pdfDyK yK bhttp://www-energy.llnl.gov/FESSP/CSRC/122246.pdfaDyK ;http://www.stsc.hill.af.mil/crosstalk/1998/apr/simplex.aspyK vhttp://www.stsc.hill.af.mil/crosstalk/1998/apr/simplex.aspaDyK ;http://www.esrin.esa.it/tidc/Press/Press96/ariane5rep.htmlyK vhttp://www.esrin.esa.it/tidc/Press/Press96/ariane5rep.htmlDyK $http://www.sohar.com/J1030/appb.htmyK Hhttp://www.sohar.com/J1030/appb.htmDyK yK Lhttp://standards.nasa.gov/sitemap.htmDyK yK Rhttp://llis.nasa.gov/llis/llis/main.htmlDyK yK Lhttp://standards.nasa.gov/sitemap.htmDyK http://nodis.hq.nasa.gov/yK 4http://nodis.hq.nasa.gov/]DyK :http://www.hq.nasa.gov/office/hqlibrary/books/nasadoc.htmyK thttp://www.hq.nasa.gov/office/hqlibrary/books/nasadoc.htmYDyK 9http://iss-www.jsc.nasa.gov:1532/palsagnt/plsql/palshomeyK rhttp://iss-www.jsc.nasa.gov:1532/palsagnt/plsql/palshome!DyK +http://atb-www.larc.nasa.gov/fm/index.htmlyK Vhttp://atb-www.larc.nasa.gov/fm/index.htmlDyK http://sel.gsfc.nasa.gov/yK 4http://sel.gsfc.nasa.gov/DyK (http://satc.gsfc.nasa.gov/homepage.htmlyK Phttp://satc.gsfc.nasa.gov/homepage.htmlDyK http://www.ivv.nasa.gov/yK 2http://www.ivv.nasa.gov/DyK $http://swg.jpl.nasa.gov/index.shtmlyK Hhttp://swg.jpl.nasa.gov/index.shtmlDyK http://www.swebok.org/yK .http://www.swebok.org/1DyK /http://www.totalmetrics.com/resource/links.htmyK ^http://www.totalmetrics.com/resource/links.htm)DyK -http://www.methods-tools.com/html/tools.htmlyK Zhttp://www.methods-tools.com/html/tools.htmlDyK (http://www.cmpcmm.com/cc/standards.htmlyK Phttp://www.cmpcmm.com/cc/standards.html DyK %http://sunnyday.mit.edu/safety-club/yK Jhttp://sunnyday.mit.edu/safety-club/!DyK +http://archive.comlab.ox.ac.uk/safety.htmlyK Vhttp://archive.comlab.ox.ac.uk/safety.html1DyK /http://hissa.ncsl.nist.gov/publications/sp223/yK ^http://hissa.ncsl.nist.gov/publications/sp223/iDyK =http://www.cera2.com/WebID/realtime/safety/blank/org/a-z.htmyK zhttp://www.cera2.com/WebID/realtime/safety/blank/org/a-z.htm DyK %http://www.ssq.org/welcome_main.htmlyK Jhttp://www.ssq.org/welcome_main.htmlDyK http://www.io.com/~wazmo/qa/yK :http://www.io.com/~wazmo/qa/!DyK +http://www.fda.gov/cdrh/comp/swareval.htmlyK Vhttp://www.fda.gov/cdrh/comp/swareval.htmlDyK *http://www.testingstuff.com/testing2.htmlyK Thttp://www.testingstuff.com/testing2.htmlDyK http://www.softwareqatest.com/yK >http://www.softwareqatest.com/UDyK 8http://www.testworks.com/Institute/HotList/index.9.htmlyK phttp://www.testworks.com/Institute/HotList/index.9.htmlDyK 'http://www.testingcraft.com/index.htmlyK Nhttp://www.testingcraft.com/index.html=DyK 2http://www.construx.com/survivalguide/chapter.htmyK dhttp://www.construx.com/survivalguide/chapter.htmDyK  http://www.construx.com/doc.htmyK @http://www.construx.com/doc.htm%DyK ,http://www.pogner.demon.co.uk/mil_498/6.htmyK Xhttp://www.pogner.demon.co.uk/mil_498/6.htmDyK http://manta.cs.vt.edu/ase/yK 8http://manta.cs.vt.edu/ase/mDyK >http://www.qucis.queensu.ca/Software-Engineering/reading.htmlyK |http://www.qucis.queensu.ca/Software-Engineering/reading.htmliDyK =http://www.caip.rutgers.edu/~marsic/Teaching/ISE-online.htmlyK zhttp://www.caip.rutgers.edu/~marsic/Teaching/ISE-online.htmlQDyK 7http://www.cc.gatech.edu/computing/SW_Eng/hotlist.htmlyK nhttp://www.cc.gatech.edu/computing/SW_Eng/hotlist.htmlMDyK 6http://www.enteract.com/~bradapp/links/swe-links.htmlyK lhttp://www.enteract.com/~bradapp/links/swe-links.html9DyK 1http://www.bmpcoe.org/guideline/books/index.htmlyK bhttp://www.bmpcoe.org/guideline/books/index.htmlDyK yK 2http://www.embedded.com/DyK http://www.ganssle.com/yK 0http://www.ganssle.com/;DdSm g/=0  # AbǑWIrk1&8PnǑWIrk1&8PNG  IHDR gAMA|Q pHYs.#.#x?vIDATxM5e8E ]d`0 ʂj iaXyaoc6E h=UWςƽfcs][P[lRg90[z+8u*s9d~߉̈T6jQ,^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^\M_|[_ %RL¶?[mBxZR1խBxZ(iv%=ɨέo:r-x;r|[kW>_GZ>8»H\YۿOFx'`k}yxeDx$^.«VJ/]Gt$ /@"yxPHV8l(Dx" ^/B@! P(Dx$›J ^DxHtU$ /@"[5/@nyn25JEUkeگ5pkH֛u徴JCqk`LvWwa(7Gq+V媗 k 0-m_X5C!Z-ԎJ݃ .0:S%#^Ftnۼ%m~~g?fx?>$'x49((1SO3tԍ]gaդ/"? G*~VѺ&d xG=dxYeJ/ށWY GJW#R3g*$x!65#m[\ h3w#'+Fk32Flnu/źҬG.TڿxMEը*x3ͧy;2*xY>h^6׼z!x肜SkgCy'g3xS_|R`^:8Eex]/^< :xFiR)XmVcޛSv1M!Pҕ^?qlneFFs}d$@y^28EsxsIEKW30uW#Oë0xqx#+HAK1WC0F4 4/ ^êErܶ>$lhO!>5_1>3/.JW[+|82(F |݅1ame&߅TyaxA᥈kj$ iU"> -~G^RN;Dpic,O&ë.{t6< 0rax_S#Ѡ{4;ǎgv7 6ÓO2b<}xP2}a=O5o^*Cqoq:xs^P/in)x{%;4,f dP({6;pR{K^?crف x6zJȅ 2dIv.na.^=K; <›*n5l~^96uYR-U 9Sx Cr8V*׫Ax?x52FxMJ~M#O#[v8)7N?<8PbqI"ex%s7'bK!Ѽ#zu`}Sj!4#'!);Q[g52TE1.O/Vn2tNVU-M^ xUG(9b^{ u:7GEh+ ř7K.{QxzTdg4*W^N-.?x *SExǟfJ^yC#h^<,X4Q7>go[>`ꭃGEO=$CQ^UGd{ر*"Ok; F\K= &xԩVJw|G7WSpAf  IWޚQy)|\4/mP_*s4_CG(eXQlVv> x'0N< c Fx&qqi2ӰhʖU&Kxw93*̱:DxG%}S[M=(yiG΢n#< ^g{p/;u:^u< =C:ה?oSg㑧^kf ]x17O+q/[m5&ӫqV-+lj}6BK^SuЊx_yWWi$"<:h~/igF)Zv WB[9j0+pQhKw}Z:8ljRaӒJϊ2y^DxW'.z5/@"$Z$ /@b/$ /@"$ H ^Dx FDxmDxH ^Dxr)V_^.(A7KV>" ߏJo쟿ٙXh;Bx{Exat^[Vn}/ގt뢍/Un>Ex[Hkik-/HַySx$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ i ^w̲S6w1vr|Td xf =lOaf{]ps^3V5oA ~OO~q 'k<Ҹ MDH69O?и PxzoAV?Nt ~ }mvqLzg>vK Wвa<@xa&vH-k(0xO>ʤﰎu2;2=ʤgLnA x=Hl ^}ln{??ic c!ڣǰ!qw1,F뇰eb> w cXxs"*9eG0Nяyp'GCNۄ0=v={-ܽ^Q+˺[Sw%0~^:x M{+-sBK^oaV[R&; Ma/1EX:x { o^XK^[:-_؊.ʯU"Yo{KUtwXoh\<[sX% xwǕj[CY*-@4Mǩh2s&[խuyo8j}!g|$Nњ֤_[{ŗ\^oήd*e;+8MUUW{|nmͭ]Y<}))5W;5=!x )kkSҽ.:-D x*NvezW{֩jzOi} +^Jk/s+.m$ٕ oSUwc8x^%6ȆFLHuJ长 L`o 9./[t<6 rG8:ũ9<|BJ)l1>uJ*޲M|CpfxΗv/sjG]㸹nsi>4V;z̟{KʂZ:Z.yex甔ێ:-+܌&n)q21r8OW\ '<:˽ܯ6axOTICσiht]x&)3EDMe^᥀5^Ns mfO1<7I@zYSx|F ?#H/=LK+;>kBt Qk ^u^๷vv D@F!19Q?GSk^8.p%lsgsGu^B,Y -7+u85UpuΩWsEZ2ҫb_^E1< x K۶_][ix£I{ H#k+1U 0<êVU<{q Kg+Gx2qWAQz<Ýc$ϒ!xntbhCx%«^>I>L1)iҚqx ѷa5.O<^ͯI _7^B/c&!S;~l]x4]E{z<,<<֤d=<9&6<- 3ޮPY7ZAs5)qLaI1xÊ཯Rt3{p#<.bm9!D8W| *rx +xfό3`p>Gs0*GKu:,tmxߠKzRwn!JKهk~ILFm(':4«k12o0t1˞R+;o5As^WpAG'ȯ=wϽ Wyx6 \:/ 8u) w]xfغG ^jj.F٦Kmϻ29@qK.Ipn~9s܄xu0S0xXh0Ԫ8-~#a WJƀ=A6+4I$ٙ,S~w1i_n ]H>O <^=~7c4@ä(=

*'o+=h< p«axl3sޛD6y5ްm3'Crg. $妢ZL[#BH^C2 ëP/w#\T^٦x|43YOWl [GKE,H7#gË1“X]b\Ҳ UY=xtk*EХLߠr}]4wij(>elBA?}x2^"Zkgϻ.<[û- ^͊)r *PckI~.W @>uJ2]W^oz':g_|idC౿G(ǬƔ7Aq <q3B_L9h&7( }7nl"v2<6%;\IjIX9SgWRz.Ͷ8ZGYh5> ix}qzg!&>F=E-pnQK'wAM&Q iC[buiSV42MecxHRnl%rddWcaxPZg_ ߠ ~<Ϙ3 &]͎RYTa'xx b/Kg# ۗvއΤ2\[x懣*?JQ!xdam^2gcmDR2<{/^3 f" ]ʫ}AF']!.` / r:F2GMW9\)ma-u9~mO1bX xu*y܆?;=MvԂnûkJ+ \ <5K׈b'&*xbEMj S;u,{/ 4>]  x΀3<蕃˷-Mvg>:/^ˀWWNxmbfyԝwhްӉڇSVfrӨu m5 xãbxjVR ep9x^1&79 t:N5B> ڜ#x.TVW \E(.x|CC @''9ב\O'AQ0"SxazNMoWnx{SMUqf[3rY]reMW =k>02rTB 2C_t)Ғ;0l.[(-o #{JQQ.?c2^Dxr W.^DxH ^Dx y}5O;Und{,NҖ W8^834AØ苲M3UA4eO)傷{F=d苝10nC^<-6m{x5J'@^VX޾SeO +x3x-]~LxV|g9<$c)<)*VQyyM:gN U ^è(`);A_M.;@1bVvm^P0ԈQG (aqLgY(k9J>1X$:>CweNIO+5t iJhsT<4]Sc*  <~kix[Q>da[w /n>cq=E&8gO^nNڌpe4Yf˸8 #3ʙG&h2E{ͼxó*OʣxP.t*rue^y^zޓk,݁o(7xHD5)C^hoq uHi i^-%Q#!V.iyTCn'~i^?CWP;xI*^%(BseedxoࡶQFf۶[%6,ږkMB;~KK5% O=IesEmOƱ.)2E -BNO,x`0f2WrSj!lV<5Jyͫ`UǍ.qK1GaW d{6ۅW ,ࡑWN4/sjPV2GmX@|1.?$.Ig?OÃ"s0</U =ǀW'<|k[x *;l; *|Św0+ 'oB޻*_}^z2 J / r¹6q^=-'wsge^$x_Ixҟ0|N<aφ9ۅץ/im5:<<x<V$;2|V||J%͚kaq~-^暈m>/h?%sxx8^A]/i^ D -v^3?ہ{^[yv6e7Nq~-o5xtN9t=|D|<7Аf$$٠yKwb#:X]4d LR8WWZx'R!K 𲚺a<_xx^EX^>k^|[x<^͢Ξ#'Q\<4xCZC@9&S{hW֣W@x'JOǹ >)虢-0g$+^U fxpҴS|41Qcle(nSrnrb|;J ~ہW^7ʮ6x].ޗ_ ΕOLSk}"7H ^,-gcP++]k̜o_wrx % û-7w] /_^ڮxK쮷[ne-^Jxg2^tĖ1w*-Xխ者^__=uc%<}o<.{}-{ڝ/0e>ȝ/0[.u_LiTV[4wX oF"'侓jx߅땵(E땕%J w7ouM5=2n$hI-JC1ڔƲL.n_WgIT~K -u4[{@nSҺο֥u(V~MwRWVRzGeҡs]qkr+u+k,7rۧ.>pTSe;]tXz+stZ3xTk/xSXO3~cOפy0r ;:)G%T;]5Q@Da䀗xxvlv}fdܻCOGOEҹKfV;` W <?k&𜽭<<:-nKUt^{5>^^vG?-ui5qn*ۻ't).| 1xGxOt^=?d|=4Ylи5?7 +ȎoNG=>A0iѓ҇LS q\ Oם v&3xOWx*iWRX,=#*rxbzx)ëI$yn79syvGYm![Cuxe^S^?g/a=dxhǶSH N/a^{5-rIf @]&a;wS)9?Gab%yg\-xX@6Kw%:- L&~k?/i0t<<<<6+U)`+C \ x3ṋ5.'׼~;GHӜ-[>5xd/UÓ  Ifx7Λ.8x-d=wq=-+=F0U/t#; ~SRS݇Wh 'Mo>2+u6.zϭ nH6^6C$^yS: {\'~VɰӴ8p]`sQmI(.g oKP܇W5#}J>Le:V !4qthBEJtpxt{x&6xë>TxBR1x _W[RֶFըB} xo2j7o?l=xB""gM]ak=< pI/ jW+ILGm O6~漺3ax,*ܝ݃1bṷK̇؃W(x5oNCIN2aP:,5 LAMAr /ax S2lSQDGx*|0ͦcY/ޓ >)Z]!D|uaJ( Mf >CH(ax7F .ax^ 9ɭ,|Zm)hOuHn𚼃B݇SSk9Unᦿ}j7f#Y K,*Oa%2dk'{u8 ;X[6߷u0cz[x _ɍ=2j1 H5Ii2I'uCpt2{ywu >/ V"IgbtVQX gv xȌo$mbxma^#T^]YWWgxdyU퍊c0c'?} ^Fx>М$-usvI27dOrr+3+7W~5Egk%Cy[mh%_ z(Z\[Z Bx4<"N<3QyD؄ң)Qo7}Jia0hG\Qg;I4OsN2V{CyOx{-oA+Lmі!?.BaRMzj45b»]-ô72 QZ|GDG>Jm7=DxH /2Z$ /@"$Z Q$֚L)5x_:GO*N6ʕ?[3:*7<.v Ҵ 6TBR';zҡȍᡸ0PBZ#ʐu0[d!CP5@!]g$33i~HPjHr(+P7J%>d{j7_ 2װ1<$D^ j^DuȨUķh .#y*n%i&$Q R`Lbu.K׼ Cйc o ^yD!yZ%JHR %ا 2,ĖMWآ yئ<),.˯V/:Rll(r`>Ccw L=?UH%a( xY8 쌒pMܯ_ ZiբTX:NX'wx =Yފ|tQI^y7C\37XUفv=uGiax߃{xvOÝ޶֖Iya)a=Ox8/;ãn ë)KybDl -IoEH|`Y1Wg grߎW]Ѽ%G72Y3ʑ4SJusx8&#^Qy),zSf@%v<7FU^_g5p=&='L^2|'NSq^^--VrkQx;V}xc YV_e<>yP 3g ^3w?<#-sx__qʘ xOүm?0^zF9X( o쬍m & iObqf sx5),;);='::[- nC^&pQ<x7j E᷂Wo bx8禤t29@Q,Oƚx8RqHj^Fx 'vTh 5NxTuQlq_K7e3xxt-tt#k .eXt^f>{Mv^92 ,w6y;yxhpgö[Z^a&+3x=~/Ⱥ}xn֎g9=qg+<ڧ9ɼ25קOPH'yORܙPؓ ˳8!:dW؅m;3d8 x}^.1ICJ/ocיS֚o ÚCHJSxyFxU?y;|É>z.S@U\.51\<\) *;8+^+)0;ɰIʴ4dxM{/T_?hLa`x4]㐥; yR·+b#~Gxö +X R<;pCV>o%ەn|wEh݌IhgxWۆ^DxȫwUn)[O #g]iUx" jS#)dO?5yGyߪ_WGZfk?f7\ ,71ߗ\ {o"R/ c?O#W sRf {=#@Qun-W7lޖ=9^<;/@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"d[x8-k`qRq/_6~UWggma5oGɦZX>[sʦ69%k =l .K*Cy[Ue^-9rC9z[ެC9z0G-霼[3ʖC9z+YF6|P^঴%oCy3ĭ$/y{rodCx8 !ɏmO5[#2r'ѨG nz$Go;x=R8t[<^;z_ޚŲF,pNn^@vhycv{(Go3x ^ 9zr67(9z鰦}fHޛ#m=V&3D7mo==ՍN&q7<^{ooml{Go#x3 蕷qlo8z}qSFf蕷qlo6=}Go#xs׎j A67m=lہG7Q=J%6vT{p69Gd6wP%ooܴey&c ַ&v-tZR*[_-z&bP =LOߪQho/k{ zF/ۋ~.t^5O59N[NRl9ooo/3֏Fe$7zEJg`fWޘ^&#x?W^雴@vzglOcztZF6vM^*jlϮ4 '?߁{ZJ<<0sHzF˪P<~ay^qzu69$WiWeGȨGihK3EG}^{YU߹-ImIMVг^Ľ`AXuI*k^ 'kiT\!^u+s/w;wNHQt]zxΕ)k7^; &x9Mx ng &}HҸ[m+Y<.u:Wj^QL95#om Iy{xG.羥p}%mQZy"VxX O^ ax+txG˶nDޑ{ w$,Q`WEyضEx{Nu#< u}"G+m %$;3']/etIṅ-e4_5:|H94G&pxG.9zl͚)ߍ~<F9t}s>iTFYFS9)O;x3C*V uڽwO%r=xONO[{xx y"oj"G/ً9xxK;M ɍJ{pոmUS.a/n u%7ۖhg>^޳H@ћ«|r#݃緜RQE{XI· :Q>>x=mGܳɰWwbp.q6^duoeHY`s.75%PjҾmB))(fuMx'’ް]e&IN%֛ / |~^. !6D<8?< ]މ6 Kōh;d;O][9irTjj3 0"jԹb;Vu8ͧ<%`x h_`p8z[)b: wo][ NdW Q&Ƨg֧*Ho٤iMN2V<[ ?wXmiҮ<{1/ ނoS"4%q 9PI;l%R㻎uʝ^4o_>"oXr[łj8d>*;ZqjlN{a%M⋗ow!3xRxұew#x2lcy#<2Ґ!0xU֢U-Hdj>rFBa+itGb_(RHAZ/gʼnOj xFDgkRo\ojdjdWHK<< EHy vnSKxLu xz^2G ,Md6PըkpU)0o¥ζk`YIrEIPӨxbGxOܮhqGXk&x-M.:xNNVy9 IPL[seX%xcooQIGyNA+ë 5tV l+R+Y.Dx|gj~^nYCm}V$ܺ%U6C0<{oQnz |=Ѫ=<.ҝc݅j "*x?gS>zEo0pϩ-5yO*GxXkyW\[oNJ0>DpOS{^' y͚9$:ȥS*Zzw<;F J/%M3;ʐ |NEUhܥ~yR Ct ,~@R#yNW.ۤ*]o$vp +8$x 9D#7*0 g[\P*Oi] k:&XuzxwIE9ݧ ɍ6~)yxrbCK7K -lڍP*rV2eH};.𪱉-#<[HHR>_XBxE 6 L؏2Go/}%;-VYx'Y9()ȖGU.U/i oYD[T,|˫bniY~RA8RQ5 h귰=TD{]RRr{h}=r--.b ASFk%p3cOb¯ׁw&znq tŨy6N)q#Ӝc)<:tia}u -F#3REm(+Rn# :Bts/M@Au"x*SEeȇ GxCz.Me?F~@i[!"hA wf\[͕6duS)WŬxNU,t-Ovg(^'[3#ZT8T@H Xsk[\=ã"#.:*f2|N!hJK)5E;4n|֭hNF.@P<0x\]J(. Wduqds\2ZZ1ׅtx :W>EJ&x*xxOMq=G Ɏs3/B9u\h {T U4 g.MZW+ =x+NfxǦ\} R *8!ysx)gleNU¶'sx:t<xHF-Jo=<) G2^3Ns6CIjQ{Iy%u89Η|D!ϟsO൅]ASo7&-:@u} Bvr=x'@] * ^ xC ŸȪ4Osy[>6PSKxǯ?IG#u{ H^F+ t' e S&݇.v̯1w:waxp|\IG#i^S#v13؇WOyO61Y%yT l[3W:x fsxN8z?6NuG/k>Y/}%&<<8e ̥[Ҽ6(eN Gk+azާhGcW2Ju>CsA'9E;s{oWsrWD*yCgqx"}{Jd G{;;MQi>),JQˈI`)@3x]xԕp^F'h1 ?OQ2;yΫFxn}xGe2<$SuFM^盪duBF< iuCϨU0lN;Z)#yCW^3<$>MxGc' :<-@Vܰ,V֣R߶ ĐNv(zͫZX5o|M&2hHzigcK^ᗸޑoە~Օ]v?Z"oɯrސWΚW']'s%DWNUcfz8:Ѕrm'ƌWsw{ mgkdr%27q Z:x?nR olCi#69uK>|6Ot x>3!Дh1cD8Kuf0Ȼ^ Q%=^8:i,d̞ 9 Ƨk;W+.#l,Lkyع$&AԟjIQvu>o1| LN5P0D(a. { p}'~LT5cLUq a{p-Z;_hKS}"qsV_A/yi$SD'O, [;Ew(twqѻe^۝z{79d xnR;8z/H6?F7rӺ츇.^e^l-' } 7ox{"8" >, ]ex{cyۋ= =G1ܼm:zmog> nFv"P Ljmog>GAܼ&moQܼ꣸y[Gq6O~x7o+x磸y[C ,(nVf^y[{7o3x0nf&Faܼ÷mo==x7o3xaܼ0X`&pXyC+Eq6qܼu~{7oCx>!<Z^1l@^Ѿ<-Q#y*[CD<-!Hʖ0=P@jSxpS^+ )<ҺG Hm wm )s/n}C)[£st[sʦr6שr6gO^S^s$M[$3uf7>-.[/m >1hJ ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^DxH ^Dx-vW6n}K'tNc085GLj&oydxÀߔgMൂhnoDG采԰nV(w xTטּ|z3{s\fpRf͠<.l_Ew !C␩"py t/X璘kD%^}: Gw3cjel©mi߹=[8[~O66YK˨q7+jT [׸+>/^/%g}HzOS_Teu:*jh:%2x W!4eI~6B$j z|s&X+6_3x_wps{/cx JMSm5yGܸnO㼧k dڹ˫kNsdw4sxRl0R%X1-\;CZŦr~Vo L&5r_u?NiWwdiMj2<7}]ܚDʏN}Zbah(Q?"x^U|OKaړAc}z1 [^!LL9xsc0=SV%4֟<<{p0՘J^Cx9" ONkgGxWԼ9hZS=< [=a<-e< HrYRhsǧ0^%):/|c}^k3眑~xg>xgZ =zG~p g SzOH`8l3ƺE y^ԕwjl_(k.xZxxM$69A#xĥƶ) &Q%rɩ+[[}j#x_58Sx[vU </1] =xq; ʆ=^Fp-6OwY<7JӇKDѕðIdQ ^ /exdu NդR&ϠdZg(*t߯ZvWx.­rN++Fx_BUgk3gA U)*'&nx46Q]i!X3g2dkۅS/t24 &x Js^$('Z##d5 yΜ(ּ^f ~=^0ƕ%o}|h^౯H7x x5k9ᑏeFxƔ~^/kvk37׼DWw6̓3X4֡}Tj/a[2r6=暇 IL~9_6>fW}xMl&ozx"zё # 3xaxO&.viCsM<׈|[NxG ; w _W9#'ĐaQzk[ᙥtC{HH=sò8GU5~s;4Ûyo+ 쨊vHr;IFx4GSJt] ^4e?څ-[Izmmj 0)l8m ^^BC=Ne{+hqW2Io!LM3ڧ0vN9W+pxt|Sro mXZv lc^ 'xزS<`03TQ!:+ ^]b{aN"ʵODd:T\LCVt d靐 iJ2^pQOUk»wqx9|{0ofȀWW#+[n~G,W0ҩHg-QEςR~򒋼>5 K wW(0ex φQ%R+ynm=Ӽ*o>| ܒ3r_Q+ _Zz w]x^jb[b8?DW$&!k{TVtJz(t)62h tL{O"{> aᏠ@ot0Pf6j \|Эg}%0s03e+4UMTAC#v2lC^62q`i"Ċ)G+O-s-z?W[okg*48A\HoWV=,7Sx4})#P3x~ jw^+ۛ)Uk3N ^Bd7+ٙ8ݎ+ix|Y/ax3޺\kQxO;CrIcx"Na2 +pQ O3I udPQ⑇|0Bvh>x oJkk)ӠyﻃZsxzNzE}.q Q.2~4L:ƒ 狨ϰ=zx7;lMPmvQyUKd>hJ‹ՏȿВUSk:fSĀ옠^us< «AOv 9Z<<8G>E?af<~|Y۝*[}a ^=i ]KFx =b5~#\{/u|d;$sK*ODJ,vSQ}G:w`)<)^$yȲȏU{bMIF&3O2ggwqKV6-# oH~19vD  9l#V[Ts7G{38(U!z#JU\ru \}x'j*B/d<ԆB_^aN-Ooz+ 7re2#W% o W!|2ҁ7 2=!<d"6<}LUSxMI;xfxVmI@21r:d6Γ;:hU'U܊TQ)/hyFKt ] Uva˅rZHP%K=<4:'xÓ $$Jo|O&s2>i k[OBR\ifkv~\ou.< rLCRe+ ?xIй[;'}_/L<0<;m(<.W\k1 Auxβ6~JxJx;E~hë쒹?xG$¶$0QPxrb7y1Î3›?q#}^yNu^~,mS<MV/ü.W_#rRה3m.hÛCJ5MhMGUĔu sx= J<)<5!dt#TZXocǧIJ*==9^bAO&3lM\(G047C>tM?ʖKP Jd[AlΒ>qGaf7y NF +W5.-'nOZN7>ˬ,٠&S}ܥo9د$p<ExKXiBHwnS ^Dx/% /@"$ /@q# "WTnqAFAՄ/}CP" ZP(e7y9-wm+p-WQn΢[X`jbԟDK ^KLf|#,Z唷sB_ }xOI3|mP^%m<@zGAon'&wvRKmy^FNJ'4S]ɉd/,$b ^v}@ 棽K%)LYΩ猩PLzԟJS$?؂~XTo˅g%_VU{0.D$jiܘƳ|:cSNhvOz̬4m}93hWƚg؞Q?:ptLhxhI|9$~4Jë5ȭ%xMɹ; I[t58X,ۛE_y^rcSNFxk?=LWI&d)DqC&jC*xPfIX9xM5<[jdx{Hb:T9eq~kFxlm/Ekɹ P 3 kއP5bxz?S&ͅE5Npj>ـ5*z6 x\^^yG?3 L-yFrhaةP[m4&x *LSW>V*Z><51M7bvL/Z&m?>K|^cK}3%T|4eM=\f4Col I1<ӏmx}7jqmi9xTޔ Z yJ ^i[}.ӣpY/^mzpwRRLyL)n5Ӹ5] ڲRǔ熷[&kxg} x^:7M Pqb4~1bQ715Cf| څQԎ(Y hp4_4Ƨy{Wi>őX)<ou0< r[UINmҔ^i<TǺY[?^  T̪ G0z?^:[m8 /fTt O4gC/FѓfóGы5a kDex;9F'Q5s2XVh I)=G\\@. <7i nd3B^!_N_vQ:U*Hrf~6Ո cewY2[_ۮv7| ی2C`メmxs'٭KE& 1Ku5/~>loڀޑ^j}l]7]2r8oW0<鴌נ ^Rި<'k?sAd:y [:m˙j`r?7kߌ &?(6ŗdLC&ug=O`XL g<K!)IU7 ^k6ڮ .Lb8eG}ϗ \ Nt k-ъ _t9#CI!xZU>ܔON7 5 u!h d"KyXR49 sDG Rg^.i_9C X{6xfwHכHJ([NwTÐq7jN/7Y)!=ExKwe}W»WiG^ ܮX Y^l_jŚ:d%2ph{jw=doUQ57Oa_ݚQY \C~On_EuDV% 7+8?5-ൗUwόSkўp4B4!;]f>9ot=?|VsXEncl0.SxCBu -1b**%| xCDZQ뎞lUھ_< اWOF3xݢeѩ5NIa+x /sk$ .x>6}&f|(cxKd^69[2ߔU1 ȁڣ|^aު)wLػ5g[{B5njDqt.<깊& {,glx̸=1j%5ܵm*+VbGCHkGi!jugNZ^d1@U&x9“3(/<[x ?ד>M"UԿFxuZY+(˰-hB<4+m>I|<^çW"Y=B%u'Mo5,ht6Y31m)LJSWq]ջLxzx]ꆭhAßR JOZ3jAEMEs mm^R< kOϿˁS R |kfb҉TUE}]I-X[exF_<|+d#WTG2Óخn,?c Ãʯ =n+;)Tbറy}m:^gD .|UBvi$d xzijBwϼudbjAc ^e{-O7_my;{htujGA6 9컵' ޭoydۜoydCu]=l9gDzCn}O&2_G"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@"$ /@6=NWȖl :M_Ʋ<߀-=lַ|zlvԱ>l<}B#-^Vt oPkhno8־lo<2־loԷq:t3moz~Io#xS`Lz Շ67<v#8zx̡#rTKw4^K0[=Mq{;fUu~{; ? 2¡+bs%ቈ{n}sזPx'f_b;yj[ܵ%qWXkK(_;ANN? l{~yO> Wo wr1;-'vw> wʢP 'JOgN@x'_܆+ONz#w> wY(SN<> wzVNəx/nqF}qᝒ3k{_܆;C';%g6 ޙI-;%gry=2#w@x?ީgW'|@xW}/AwvXyd Yp瑁0x\;_;Exǥ= xg_w>uKu瑁0xřDx1J ٢;I]ޝU#'wX%ނ@};oK;&o#ra x19ԵGT-+[CX+!NDRI[>w%Kkkj 7qu/XrwCukL%ނ xX> Q|U 1uHs>%9?AyxO{x cEj2FR-wt<232lC ̖>SGV~G?z==̚ _UC1dW3Op?督֓NrL{3[p=V1*`x)@t:<8) o}]/eHa5:xs"NJlJSڮh駮=^J~O_ŷ+W9X3+vB& Ia)DZ\xt.iR( LT1lZ?7J8?Z[c?rT.)MU;MV%T4Fy-l3|6nyx)]tL["bh{ӛ)CS; }dyx' xΣsT ^xvY6:y%t;OA:Z`&-iw0<\].<7PMnP.zc22 8"x^;k \_L>6;*tꝎՊI=19d]Ϲ;ÔH΋. 2%ox69s)Z<<1N2) i09xF=>)ù.}ƚI6]x;z+w˧X ^W<7EҵqAϫ\Ou0;J=P*3{{M5GO3%;;, Bd4ExzoWn7_rxƨ/[__O'|ZpGrqh1)cQ1O' -L:xˈݻ.) [xm~/gwω3xNZUxx ^qrM@@Uʼn>Y ;0P5$+ϊuʘHe!x1xmwy2gis/ǔES\Нbx% 2~UHœ=wϪLVcGm' xKZUW?Oh#xtG`U/vce}<Ȓà9׌ҼEQSa+2.~O`q*N8ixv(v^L{k17mθy='dg|3xr"(~L{>^rJSR=Q2KSG9IAh- $GT)4Mۜ^ _Je+lH$y|1u<<Pl 1?oǷ ņvA:we9a;Zn۷x6/D@^w$SfaRRW n8Bkmj9uZx+*=񬚳6, [FĿ~^ oqw˸(-mʸ(ujx 0^<x5Y oq/o_3eTx|>Y o% W.|J^ o{%W-|jxtޞ,nSJlOӭi\(]PQo"nna}F~֨e5O<85ٴBQ\\XGa?`&5`xxD6TOU@ln $uɔL85{R ;gq>h]'߼6xxoG"؉Yj)HdxEӚ%)$ItYG8MԿ`~O) '9O{:T^/PkOoӖI{m˄| ^@iOmR*~29ِWs2-7 ZI>!xNRdYb[#]&7.czN`8r- b%U f'nT~<&ϫ_2WxxhCvȢSj3|'-FYγtr9Y}V#^q+~Bl㿓->y1g=<5go-&ql(H%7Fr#e\p8%;R.qz N%RPI37}Ny(V#<\IARG&@5kxDg:Wq=񕋭dBn1xJє".ldmJ kCX-yP?%oY6$0RUKr%#[EyC* -Բy.y9?k6ݡ#M/p;^@mg^rX^cp)7ZS4R=/:.CB"{/@x1K[xA^5Iw,|meN02f,QgsL+g-1,LL؝׆eM+[^C$bGz[WJ-vxIK/ V2^P_ oC܂̱5YPWdE/=^K܈E]}ʬŧ/"xY1̺㓲*k4T޷KWgxm{x^ m³/,V[ކLfІE:S|ZQ?",-/]ṇ@]2 /uۊZI$SxOm_[Q'~D x"o ; /?pe;; l^gmCx)P^18x&fV99|FZG'rã}3m3u_ Yc.tj@@{x;d_x]<$uKz"E"79OrҷO`oee8#0ԹWM[a/q!v<#&y=<:/|~mЄDx<QPTR#.Yp^^Y~ %p-𠪝3^\ m7iy K}B#x[w^L)VoGYx( 0&بo-/ ge^N#k0+ μcË!O ےt/S-PᎺR8kۨ]= %cx08F7W oSYs4@ ǁw0ëGbTѰqǼ9xһK`ihrg- 0ཌྷ(Nš t$'uK=߶leaŠܫ<yÃMwsppDxe@  .0 +7ڡܠC[ҥޖxNATZ(|}`(P| ,M#x saC<*^! / oIT~.WOCGy)lٵH(8)VJ"x>zF^KՑ:-X3v={B7}Nr _}h5Mᡯ mK%?[ u~mrjGqR6 :>ե[8Or* z7_˲=ֹd87_-\Mka:=n3_bz<,1^ oZ&8PX ݚWA#lnC9a,hٶ@+dOx2OAĶD64]u8t)lśI O m*}؂H O 'Х&w/'Ѕi Om2x۝'Rx)<.moa%ŭo^«\AO&q.C70t; 14x.˹i02bj6v%Aï-7wfm_o[TbRn/!¤AżM]P- o6\W04<& 軺-޶g;yxWgu^YBQ7ưAg L8x˅7w=cK^*`>j J/Kq @1)JxH' "ͅRfUX3azxhwW1< !CfXN≗ @x{ cތ*%VwÉQ1hclۨ?];wCEׁbE6bTxI  ^ Ɲ o[:6x.Rx)<@ O%Po}E6[ vxI ^ #]osHBڸcT -t%xU}l/)<@ Oa:űq2x'(^ t%xw@ OkuO[#)<. #)<@WwWvE?Iym?5Ty=l~ڎBy)<H Rx)<@ Om(4Nos*o]>w7ibin{Vɭo)t`‹o}O!H ORx}S'?VxB1ZMD O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'H Rx)<@ O 'wW OVn}KӮ[策oU{skiGx塏ӎrϭoZF}Iv?x‰ rW/g ^c٭o: ^gj7xg/ev0MT=ck/x@i^ʠQk{)^qWAo/x/d ^;6󗱼.qG-_v'xx^v'x8i^ƌv:CcxԑR3yx'c˘1wª~k}u틘nw:?/ 2)^t t^m{N%L+O_E^mx 0ovxrz=s^tynWoyr{{rJ^<ѭ:v~^y9&v|CoxK  IHN-m =9v ^}xCO1[87zrxC =9IN)9eSqt[ex#7Wa7v.=rxCO ASV4[bxf[bx쑣[bx³G݊k]e|{ZᝋTJ;; kn%K5ۃ[;;,w~N8,wgwp+Y )wp+Y <[Rx;Rx秄/1sܗ,|%#V#3)GfcτGl< >37{lrI%?L1cc^zqwǏ{4E-ıBx9VyWzq>DwǮPx*X O ;vt ]O!8ѐ]+zGó =zEN%r:o4[^QFT2x?{ь>cxtAvqmsz|񱤆x<{]c^g oŲҀ%tk x%i#ۇ*ϔBEǗo0HmD^@L?gJľwahq4pN8]i}%}^=]S2ڝi]:x#Ë]NF#غ&o4)_Rpi|sMa&xLk!KmyuR0F8xp;P ILPm[ rWnCxw`g*+^Y 2/Ky!kzxf?#K65Zx 횭q_>ܱE*S9xak&Kzcʽ~3m{x)WbWCLM L)`x1C^5m/~ӭ 6ޠRLK?fᵶ $AxX4 \ dvf'x5th goR2w9Oޚd <`2 q?G/UK0KfE fbB"U7ۑklm ~$1]&^$/ş {=go;f:x5Gwdu=r {xk|:F6gG}:UrS^!Cyw!xA * Ly:p,"x}ójB-o{(24Қ7UK^,«^;gG6t^%A?^][xx`-Ε4IRFD8Orl k+Lhmp#9gkx|,t*b0".aNJ+`AлA 3c= {`xJx<C͗x)AE߀ 1ncMxk^:<N7_̤${H/[{J֛ rȱ?mͭyl5ͯE0ja~'*46m?X7+WÑ!&X0WCJ=xem\wJE+yÃn2`fw]_/oZ4,`Ãk!33 ͇Hrx?9gxW„YW;xgq@UhXq2x5}uDx1xW* >5A૥FHKзg;f/5Il̆,£rl cTgLE(`~ⰒWx("K WmDcA~g>P<"xp6ARr 4\+wKg03plKTJ, ͱ6r4>(]uO½1 <}f,s I,Fh-esp Wm ] QtK~A=cxg|#`*Ņ*h*}ͦ¾ Khr{DڹXp^`ޛEx <$FiJ>AGu]mIXufakyZի~5 ~ w<<4t uKt&()RB~_;Z{ ~'mh[Y7X_/[Y3g< <QD%-] Ҡ&L$Cgh\m 6WuPpyx`$9苂a7ËiX<\aiVh1m/q/ g([^WB /‹zxfʖKà7],S;Js `|\Sx '\ oKyeL#OrZ O*p-K &Kzxz3)h-<^ s2-Op;tdmj >%xQ޼`N˦ 3A0/\W7G) |caGLje46mٓp^t+x37 bm+f އQ˜Vd#(mv /8e:T-(];x?|O -o^8#y'-|YnSx« ^i 9)]ˣ\9CCU D.mյA>^Y,Rʚq ^+X&<+õo1`؈q2*^M(s`۶l6 xqcx>'r:N!n"<õ PA4F %k!ާպ]ۊ. iNN4{Q=ãF@@zbNcƈE@BLUÓz7bx!1w66^{;(>n`7!T}66'>6x=ø-qC|^=:<ױlP >$ V=VzgVvg aC{5b,2 qs \Ή {lbVv% H(%᲍uζzji 1k)V\aj1ki uVg긒(K_޵no'?o ^:<.7XiK›h#cJVx] omax3pUVD፴1+_A+<.^^u)3h66tps a]b]o+Y4 @Շ:wb oK,UqWm:ak ` |VSK[pg{C$F|cx}Bx~|Fjyx)ey"<+pRxɘ>xXpt]izr^#p<וM ۦr fЅ0 _huw]ۮ:iz {mT2ǚzG~hOdR^[&1NP\1b$Ǐv|xVLiR^PKvEz cڙwK<:8SN%džW-q=u$o^ zֶUrn;|΍Ҍף"7gЫGeR>'xb 4F: kmOZ+myq9O=N- $3+dl?5K~lhm;̝ 7P/*b^/{wdx=2֩VfZ^D^TuxJേWa</Ex1e_U&̇}#gZP9 Z/񕳩B0xF.Z跽^3nyȟוD`'WQf0ߚŧ~ģ+f, :xv08,}:Pb_ P;쥅7+_eM81v[su?~Db֘_D/wV}NgtvKώˊ+VZ\Y G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A/+o~MdGÉM?qU7{oܫ*|ۂ F?bxJ'?`x㚄.?~?DrP&7q,e[jrzH~Zx}ތQ(zO;fbxOe> o3bx=/'_yb:y@"Af +kk4 %lx01ç#*̷Q\a)`'{P@.TLXd;!/Dž%zLoy5F.+Dځ*xЖL@kj^&ԕ&[HxPexe+hx~EW.&H.x+xԅ_",%_bRûix!V+TxN fk=xSxm̓YNn5<9jxEL4q>W$<ErƎ4rtvAeppΕFPɀo$~^: ^Ic ^^Re-0<ꚗH921F"BkxԅCc4L" /' =ûQt-!kԅ$$L\\M\ OC9/@Su}Rԓ3Ndh“Ӳ,}@"O^!axUҠa’ ngJf pQm9˂-Va<ϗr3/_5y0>/  gفVU[" /fxx)  <#o `C RdX y@KJ+]xAi*B87|'QHx? G[D.,u$ॾ#O ϯr#4Vt';P<k.s.׿'D xP +c`DU՛a0) //ЃcC",?ރ P; kd"<0S V3d?uETݬa-a5|WmUe D1p+1!6yV #azq< a.ڿ\yuoZ7 Ԏ xqHM^qk ں tq=x`f 5s{\R@zH_rUx3 \ ,[bOIWA '6U|%^@#bx1<yK3ʧy7gx=o׼zޚǣ-Al$?k޷K<9/P5,qAVʔ,^K" UFx A@v7?xzXtEVR}x[ph"J"Ô*/# < Oa:7& D*e 9TU֜|ѻk 5/x (x9DN@ QeXit@,J+<5jG2OI%}rxN{@%A"[E%uprxO0 9$-kx&%L@5[OU獒x0`A<_%tb_N[?^JĄL“^6B]jSwsÜyҿ XN@J6XuOnʂ8lCd~F⧆D^Y {V7=ᡝ)xjP= qAMc5W^ =O=ܬ]s*!E<‹!A!̼-^*xCy/xxl 2}\$Gnቦ}SE~jxN/B2' pw}f$-*C!^305k,,ل BP<1C\Yo`BdI yqx4,<.Cd#e=oD?9$š/΂mϞ~`x~E?Wo*W0O[!Qn(~V WXybxe^,o'2}ko'2}ko'2}r/&x`x^OWW_ޮFuW oec5b׃ q&ܽzy7L?l^m =2r}UV"2-h; ^vS7tQ_/H]"Z:oFwL? w< ,^l!A{_1{y gTq?,@7 oBxB~hxky~k7$xG_cOx&5'p^2B4DR )xz5OA-rUlq]hxE5 EU uˇ_sTi=~Y ׼G'6FxNU̫rJ^'b_>[Ab^FcVT ~rxp=hŎHjx“5*sÃJ \XY5<*qQ dx*%in \#AxQx*#4Md/P/G(xkxu1R8^ޗ#QWs^+xGx1𬞞u(xas(magx-WNI)x)y8ni5):$ juVC%vh'C;&@EKpx ~^хɷ!ĕ3?0!fЩT\ 2@^0%U<\(1 4z? 4w:{%<0ʢq+c^I0"Y.CbUZxQ0kbe*,Vh A0lVTw0ԢDm4Fc5rG/qq(>6#@ fjfk0B3"s}-͛ UP:wѓс|ੰZS"4< 9zIOM9US-b5 A(i߉u8^u8< ^ռO3qx98xxW ޒnɷK[xy8{0>O/:-@x9yiVw[/F{ِ\y0yL[xK.Q`5L>9}ē[XTva`vHg~ p+ <FÃY蹒 pJ2iaoJc F7t=|[x` ^L}02 fx=waҞF,̪m/\@ex c\mt8^-G}wh B2ӡƒ]a3l)T  M,z:V u.fb%;4:K\-O <;;ܼ}n`}-}%oyj#fcؗjS(z/s4u ۍ Bn 78֞4I:xYE`m1UL1_9SK{*̯0Qy^ËW7[Xx))${hUA[[Ԁ:=x"axr (xv 4Or[)xiqINk@x/%Bx9«w/LqR!!19N 8W(l<ɅRμF2sKBƍKhnW$h3702]x_¶tCDH},< )[˩G+W$VqZSE‹jxʓ:Lx. PA?>hXཔcDkxa&xN /Cx50:Tr$|$$o@6f:ea$?%e /8Rnkj.JӟWWWŕ Β(t e['NOS ʮITM `P8-)0< NmjӳR.[x9q !?x Up(ŝqp ,hn O=tƒ3 /2X3Cxz-Öϝ^)v>{{J~ s17'arJIG `T u) 3XLYЛT*t@S]=&Ṙ>+kvv^Vw$<.W~BxZxʫsjx^Fe^:M}?y}-ᥘ[sԎBш"w6['^IOON{ @x*;{pOQ$à ax;ּ*>>z]8ag#<%L Z />l1=rH~,bfFbn:-KN@t;L軪}C_;?m+S)yy* R[t'|mH+dzpZ/9863o\e5lYN2/~ݝm>;" t1xe`}] #^#GPlCb/-bxJdxV~^-wl)9>#Rxl-bxbS G#bxǖJdxR21ٙ؅=Qu7txԳqxKwEzCN=k~ "ofvczCM>m1ٙf<:u-ò ,`<4 oYe_2W.f[7U9xJv'\gfh2W.flk1r^㞧e{V[ouv"59-?7т,J'[|6^e~=}5g㝯[\kևi|c1<[1l77[*5Ըi".ˣ : o%^]"u6eEv7w9j),@5ձ s4dڣ9-yMoxx״4bf4KrqM[|v |C/{J/_V_ x%;3bP﯋78~uV ww~u](xA U\e K%)}gUf0gxSr78Àq:xv5[4ɧ΅w?`,Im+ޤ\4R, }fx)W ou(<;Nr[ϲ ϛ[Ϧp3l޸f-wFrex*BcR43 <44bx͖K1L2)ݫt#`xKuO2[[p̀Ȁa)X .z3W2bx+R[V]56kb8-<*{Rx붾Uef o:`0ź<*{8pq.FSJ[kLJ_eoóbn{?2ź_MTA:9 W䓔8sJj9?ztx?H¨}qHzx"&{@w? Wx[x^8^Gs+"*\ +8 ^Q]"A%~@+Kdz\XWwow+5w;M{,ˏ D$ywVx[e'ft{o(x6WgG7ͣq[|UKmm`< }^m>l|^]9f^;UXxEj>zԼrWcEޏ{Țci-ߍ8 Z_/w/^l{-ᜱز3-8d{^ wO]tm7_(G(cb[(+m`xq^ M&.)oP P(+mvzϊ3+AWF2A[{t?ã>r2ы6/R7!Ee|WSK.,pg01=+熘瀗;\bx1<Aϱ( w(#ɉ | ol<vr 0*ǟ#:Cgxx]TW՟Mf/̶|$Hk/_e?48ÃܫmyWG0%z nzxD;IiM[%?ex+G1[-T|Hڇbd=ހDӘgxR}4_ %{oޝE28zû@Kjl/4h;/zb̵u1ݢF/v.Gmi[l&L'ǻ]=p_x@.,r!i21×6EJY&C4:Uh9%L*Ɓ%\/~m~Uavru.fnhD0>/"^2 OEu^^)Hd)T{aU:j$6K0:E5%Rݹ %M5h-]p0:EB/@Jb[ sm#22OE0,t/`{SpŁ${.&l3{Lǣhxt-z. D;2?x94e޻>s0oFG.@=%ޙkj^*֥uX%4Ya;UO_;-kYgsQw7Y+ /{N5+Txc;ix/(51&9挕|,)PPJA)flztIx(eQ1vvz;ne~R)xpWH+fֽϚaTՋV_[`=az^6isचW,wfƤS ;/D+ۭy%v^tNwʕvx1bV9@#MvL*'8Cjd[Е߱ bÛtJere]XӟMzKdU0`%7/*N[vALpxŰY['y;j;^>l'+.Zgq0҉opx#VIb wW܌MxMKo.:e/)̊NoxMFý*cb]mr d;^9n?(xm=4x\r;^>e:oz›PCwޱ1tͷ7T ~,+<7eޠ9 oy&oʾc`57=$XsDk0shz(bfCóQC/?GF|a;};͍ E 7zyE0 xӖʜ8p=̘*U{8\fjl )@v$tH #Ͳ1U76{ב[4lcّfyKix1CHxs*+M*cn1ޑvΦuiXg}$G'<[Zo~|y&4-@v \e4Ix6s &a]"? d1lѯnd>ޒɗY]Žޛh;>yہ,Qy2Q#V|G/K_q*-;}F- ^ )xO d[K77nu[$ 7<5beo``hUpT7&wTpُͿ6>7v&o?Û qV/2kJQi M r'›@xZdaDy#D ̴N@1l)oYmR9$廩mF=JOƒno0(`S82G£Rf>jE&i۽j"7MJv[VnMRm8xz3وV %ZcsV)$1xUBQ) nN[MG! z[~yfԙ0h+ʊ;SN-Z1"k&T[a5oi@<#]3:48 0-gk%3oiԺI5~Vf3doōop-C?^u[LnW;ony&n7~l ͐pp]l0?[ovwZovPմi4 -sa3o7svv Cy7'->doAZwhD; o |Rǂ +g-ul7}ǂ,#]xwiH OJ>/z3 3`o~gאM]C+-wm0$=U?J6JlUᡥURP~`T%5i%0 /zP7\pxuOTn}xڔ;tC W5<Q-a^ع7^4z<Wy(Fr&idxf^zIQ+%<5/nC)&.7XMzӭ X!xi!:kxq)xef[_oJ0u}+Ix*xA< /b^Y 2&N THxMi 7~7v4"$`CFF'6mY720pK?ܶ1@ CoGxCcwXTn^a o&Y{_^Lzӵ*AW[xrz~ȑNVgxFͶ GD^UiLzjr^^Pre5XI]o/03~HxeUKN/^5<\È*6hmW7?НDS%G@ RyCM4<'1Ə^Y-q .YiK?Nwv4~0`@vi2`10ݾu:WE¯=xIdߜix0À/a$uq8d׀RQ*xa(x1@G1&?ESnQCooxXiW@P[b#xrx◢F eB^(WKӈ}\6Go?xjkEg #C Ϥ2Ꙇ'b5x2i魌@L# VE*Үikeፙkn껪x1l a%>Sud𖿴,~5obO?,3NWHxUg(`]$xf;ނƗqq0zqpt xsg>30VMʷ Sa~8#^U .R6]0 pCKC/ RHHx~JaiIOqn+fZ<^sԽ]'/ j}!T?+ix/aU KL1rY>OMfY-ݤWV{O޽[R%DtM,z^z /$kA6panSO&NH + /pP$i.|t|S`%caGW^kO\>/[x1tS]b違xJЁ;Tj)6tK7) |Tlf:UXo eAv Ruȕ$r u涅>M/#huO}Vb WjxMV͡ t.ɢSP\RBʩr0B^ 6ڥ7_*Һ &#J;,#!Rsytnb MMF Hd׃W0NA!$(9[/մ4FP4m5I$kx[]R6&Oi2 Mi_ o2zte+Q;6n! Gr /OZ?K_<>K'x_Bb0J ar^nwlONi@Q[KR&&WCӮ?ozK"PTқ13'A Oɉ8ޥ#:wt"w[xju )U*.TÃn7ii%՛ם&`mnbU I]cUML@+ś]xoxyF}%|otfׁ>)X) ^bG8hl"Շ-^whEл[8R 6W*x<<>[m?=X ߖJb^Ёȑck\lPMpe 2od̩ R/ٚN?~5"&[삕2<^w1 y1nFN‘2y1Aىaah <mMgPޯFR]x^+xVnQBsxxS{$Kz!2͍.sv;h1gg8}{t?D™[o4&/t?xFo^ M5'hk=nIy͒y~^3x+cgyʔ#&@oE40ʁ<55l&sq>L)CoK U,>N Lwg^2Mr w\K̎YH7{ ^@dfLelg^û6 ,ŚD5e &Ϟ #Ղi-K:)e9A{30?ip w hGx=7`,+ ꌂF %q{YR*GTc|fgi5ՙ }r\^qSEMgŒ RKfyGمdgDOSv #Y9#P% +?gaab#k $޼Wv-K'z]T]x 7;.^$7RYެxz\c3+y[ 3лd?r9b+x3FꓓqiLDxn[*KdW+"Ɓyv&­gάy:sS*­|&D+s[lW[?tS5C~3SzzC]vz zh g›\Xt&ē+ TxSZ&CWCuf7i^g7;JN7UWn tX ;^1N0 ^8+gʃ'M77Pz*J &'mAFGR^^)Nmg!Ó`8Fۂ0N1a4ˣ`#Y6m ev:,s R|6Xu-uM%3~W^LC)*`*1 V t"HaQ>J>}-k($bx,'^Pt%pAb5l{xQ.M1|` ޚ%^2Obvi>dFQlغW SJ)ES^:lmV>$,5<5^9\0a˩5\<3Hque mGBot367XJޙW;W1;q4X; sxy4#;W 2_ܝ_A‚@8 78KHF#<ݟzK+<(ަV7I4u驆,qs`4j؄^mc]];ov4ͬRL:tw^*롏v sZSqpd/@4pZo?OݯJ[KΛ f>y*64<Ûr*םÛ*wP+'sWrx*<:6ǍpЭp6ބe Lq xCEM/c>*0}5 mzV)ބٖ;S/="\CT"hu!׌}ECᩑsb71fd(p9 '~1%TAd,ei~6KKʥ]n /x 4~]xyjbvx*z]Y 8e7/@?u nuN6}cGx: dE e[OVǏ.d5bipxJIϛ}TC lMqXËIZxYx5k 5j'':\ /׃w/Q2[c*U#ۼ. gYT*b1Vϕf\s6T84<(j^t榊 mkxQutH$<7\R:g-q1K9OU ťy gA>Y| ] In^5tx:wƒ`ulW{] g^0욗˺ i\Rۦ!ra /Y /:^6 )WE3 ^nxKJs ;Mxq7 pӴr {#O|@x[y/'$Q>+FTY7~_Ûuӻa4ؔWjx#.Sx&|>37|X:ܢhqv=r_5<8JC< oH-\N~*܂y$WYx.VA<@B^7 y%#µь16TTl]w^U8 `6KWvhv_5^@ 1޲M^C^?YdUc =s[uw=~ ?0o젿E 5 hzN0!0 J7\$^Gg]*E`x.o~3<].AMkkL8ʵy .)m5bx1<ASʆ==VL`Xzx{,zlx6N J}QR;  G*qcxGk߹=$њ7E'ëm:M|[-~ߪ$^s']}g|(g_ڪL?w^ëORGU|@:\fw7}[*>ziimmT/(hܶ`xUBz{%)B|"{S O13<^-<<_*Qn+,&2S'FÇ{-}c*woT+ u 0<,irKNfN?{z\xDž?x+ErQn=,<83?,x2.p Oe2S|ߛ#C?Q)Є }^.BnO=jぱ;ܣÓu6^hC$x ]f;z:XZ󔕲g'p& =Cx_9t30sn07*ףR o_CAG[=H5}?1Rپc+k=&z^sck<z /k=(پDŽ׸] Wվck,} :A5^~{HxabWC!uv`N] Gwꮆ#:Įޏ/w{?"V#+^+><Oz@xy/A֞Px?^g7b_Vg4۰מ;YL"5p6=w< ]g6~C3gVov,v4To{@x7vHZNnv} MM1h ;JvFm+3PY&~/n=_nx|4V1?0Pd7[et7[{}X / G^hBk t"۫ /v=thdm iDsJ{i>k E2n:sy'}K/ϟ~(b؃BUk[ϡa݂⳱gv+}mxulmiDv,0W'6ΈCe&>f6qF /Hql ˏșI!Ko=QЛ\]?l!?u#>ۖ8+nz\|6p .RcQn ƉD*x<U0 [xz#`E-O]UK<:/ k+D<0 "U$^҅}Ex/Ek-qG xeI?\d ,spVp28xk _D[B#<۪㧿-ؑ"hS^.M'7qUC_zYe) ̐σ.с _rS8كW\pd]HWBVi^B 9 ./XUV O) =x3EeYsG%)EB 5$l[!bj'ZQ D!W@> oɫvJ *u''dU5Ye%kx9E5n+ͭhz9尚;q5'I#w`A W\-u;-|yW{N|y㧴NHįR7:s=*L]ׄ?nenu}̷陳Ph.w*Whx^xGcxz#ނ 'l|];Cƒ-,M ]WZɺgwPz3\V_xyNLd=ؤFdO(@ ~Uѣ{( -.jE#~G;;7^]Sڅ'mE8xx̚"+zD^HVmەbA,^%4^-U,/<zMARxYzwB̌s/Gxl, }Wy9l YߢTHs x/ry/f/ţTB`冿Ҍ:*Ȫ&!XV yqZz) 3mg]Q Ol .R$\7nZ 3O+Ir0~L_UO1 6OܡW38Z' gV;*s>|xebG ^BhxY /B?5ȫRC _7R¿5< m%,_ w GD JK}x/%\B7 ] ^-h _4x ƋC|cZIl*?w[:7xqsMxFxg>ik- S-,5*ƶnwU5O\n $L/CO[ ]@Ëtfp$C/iE-b+''ExkÙSH '>*d5  1 <_q̢`! YKCS+S= ^ԹFUo3OgNsxW^ΥCKp' Q'[xi{Ixh~v o]xY^!IÓ]Ù`xA oU | OV9IGhL{ 6aB;ӳZ `;лCW"hylʛҁ{8\E4WB hxNЍ+=<(%o.^ YS$Wu ӁC~3}x-~,D]xkm+Ug<$Feo\Rv&jj𰒨G冏6l- ,^ pG;‹z3炟AKWSv ׄ%>9EXu 5`ЫOV4<\y܀|"pF(x^zQ% ,qB /^</)}[USJ^gsɚL“mvIʝhx>jJV kx <7ϿcŇX 3"t w= ^I WK~9< @v5‹W&nWyO@&\"}^d$5</T+SKdZUEUܜDjvD^'b\TQ^.w-~dx!5</w>o4,Jsxe|/Iԃ\H f^cǼ{x><^zMx#WՃ<+tBÃ4 #n۔;7̝̕آ>>dOnEp*}ExIWfo ZXZ->F]pz6|[T嶌!,Vk=2Wl٦ S6M2V7`%l䕹l. o,yoIͫ @]tLodю ޝQdю z*xwHKgeݏ4Kj+\hw7]sL\zugwk̼'wBK /ûwuT5f*rOW5RxYWϪ(m gk7xDKemUvP-gROgR.nh!xx=joq+.-Bz:xj=FķV^m׵UU vF[/7[2tNf Jfj]W6Ib^-菫[ڶ:~dQٻWLb e iGG-l 寿OѶЃo@yAd"}VkbϚokëRk69^3R9UglbSՆ1Bt o#3<OC2N)c̼u <G^w<[n̼t rT\R9 ^&o/&6\/8: ^q)yoѷYLWgz;<1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx1<A G#bx B<IENDB`DdSm g/=0  # Ab#YbPZUG/nYbPZUG/PNG  IHDR gAMA|Q pHYs.#.#x?vIDATx]ؚvvi:i@Ai2hr\tq1rSt4EٛWQ) E =R$RP_)Ѓ$Bk:r]^YϳmgkNW~mj˶#^5a\кugA+¹߾{:F޼Y&k.B)k /)Ɠq ^+ ^RAU8fY]=xTj CP4%/F<ƃ=TSx׷x~-3l+,@:p5SIKJ /R5Tx+"x+K“4祡iǑĒv׍w[_€|@|xNJ7E<^yM<kbMk}]ۺ%M-^!%/iܿPx^U2^^Ke_@=kަ /"W)\psGUxeCxv`-^c<0|Ux搔 ̌'8^o3m.Ixo^񮽶Ey;xhӃ_5^=c@xiXᕌgsa7H*{F/Fr3SBx2x=8!tt0Fy1L}?xW^44^K5^'Bvpzxx)"{uSoށsOMcƫ^Ux9f?UA  SD ~g.|)&_0=߿9?Ha?pBང?hBx\J/e=6Y ^5sf%x 6 ^VY ^wJGb!R+kׯKio۹DsfxA. ^#:NH_R0 MwKLYnZ,Xnx!]uY-c_Ж?[!nUWboswZJ0:AVcxg˖X^ I*:ČGXEtnW!{ k:-sѻE2[^Gd#k׳U-*t`^mcRKN[Y^Odz+0|;"}x  ]^:>ϟ{VW1,`4Y^  ]>^_Hca2E}?)mxx[>^ڃk>A\zxqzx .E5T#_2@Pv_e@˽nB7HzK>Egxx0!Gx0!GH},0! N#dxWKxGxGxQaEk^<љHn. G_QfK mtWuyxqٯ1_ܫn:Mk|o>Nb{w^o#]ޫW '&mg׆Ͼlݳ[ב/.oŵx\:VϮ yp[֮ɿ]B+_jbљ^HM/vaxGdpT+OT͈nUXom{9SSxgJB< _R9]+ޢuS%.t?!g㕄ጎ''H~Qϒ;pK,lIolB(8ߨ~T2u`k c'|xۃߩw>^#6.y):GjAO a'-^5-^Rb(y/SPa"ޙ%Ov^u{ޣ3? (w :S{Kj|_6ZCo< KgAR+xՈhácez@#M<#+VU6^xxն/W]#4^5^,c?4xϞX96̒*]Ӈ<k$Rxf E ﬦvo7P\,)0J%Z%on 3yxzbK9I}l a^J}UIDXeUSE"Mp_+_2RĹ'ϧmjͅfGJT~ 3^𶽲zZ7R 䙵X %msc5ցwni+/л\f-Gr-5B.2|xZ9xJb믣knH uQ^+ě*.Y|k}8m8/ݴg8/}g8t yuްqC Ӣ6ϩ4i%O}x7XT-AaN_ttݢ6vNwPm|89r(ye"*p2lm2WZFiMT S"^FB%"9,ܕZ=REN>͌NG7Q3xB"5xԷfH?g2i`( KOKĿ^xKa*F-iM'Px8s 'F E;ʩw$c( )n w$(+ BlR&a%}v&vЛs}{BOn/Bi(ۃ'FiMTivV4xY6F"|=}TCE[yTc4i%ai=67U,zv heoӥxNI{eS(]<xA?i(}Ss:Zuc\= #/Q#~%QE|iT`HO_ćCR0eE|[: \דOJŻ)B͚xiUJfH ƈ]U6MV^[Xa}^th6BCoƃ>^v*y+xAzH>ks ᩦq'],+msUFh%O[o /DMU GrGUɃ_M5O;wڪZS92VߺMDaꩤIVx_3g͒ZgB&#uJO_;vWImYx{Vtx}x=3~X j:/4> +)sҴcdn3Rﴅ,oSz  S8%<@-s!ڶ=l<^*lS!@3(/.m sI-<^Pm=O5=D˾mӶsکB{V0t S׹Q7[S}"Y^"L3pk :{$#԰$U`muáL辭6 Ln>y0Qw=?o0{VbfGٿ}jvԋ#6~6zuom O#{H]6zsu7D8kEgG}G4,ύw3{sĠ;bf+(V6[ɗwDf+39}ĢZx؈S $Z|ea?e{HUp({?YrQx+c.QD}~!3|N&_;%sl A6êVxk]spM." %KrzG\Ļ5xrSU. /R=]K%xjURR ͗K ^~/6v) ~oi6QǤ=gmDtEiܩl]7^Yr;p/(>wԡ̇Wl?NѺ/̾(4^ugd纈Xtv6a>vh n[A`=k7;_xTxArlkNJxyg7#^c9I+Go*]a>Ux[vzVaÛ6cfRᵎsvJYtxAp]-Zv69SMY"kۊI_BqlԶѽm xޅsvgG x~t}{[1^|+vͼKͼpvg˕fLxՃkmnLxU"lkl̈́Wo梵@( {rh7^oeɇJͼ:ю=jl̈́1i`tc7&l̈́U>v2΃ud;0g*[ʒ\3ށ TZ*au'v=AV[*u,=uv:R&c } VfC-I nho5b9TeAxGC =[; ^gҀPCrKeIxẐ=` [=u&/o`rKeW-S6R3mNx&Hw,[˗퍝n /fޒ,Sm7fKxl7g[#zy{h̛|qq?hYk=u&ZZ~OC}V,GR{Ԫn%,wdtgv̂vXZ~OCyG9^Oj g7:_9feu67Άfޒz|@f,x=':z.g7^˶&47'ǽ+r3=J^V3vN(ewN9n{^г~Q^,Ḱ7^G1sߖvTn}BI[gj|fPސ}V>eu!ǾE^,~jSۋXp7 苽n2"R_Zu΅5h3c@$#hp`gJku,z/̍gbYv0zxXlB1MG8i+g|c{T4fۙ[a}iٟT'm4F8i+gpX ^pRްQyݰQZ*~Zy^a[4.Je4Zqyj%2wxV(yj%̻\֖(mobrx(gb058ycb#r y8Ѝ[ FwxMk#/.oc5.>ҍs[^4^=q˔u.B/n̻dXͼKƫ+ٱyWmhxx-|j̆sQњygjњyg*c ^1Ϩt[^6gb#LCoK3mȗWw6wxx_Nnw1hU}@JOPSx2 hm/*HCzok./ 3*\;D_ K% ״l__2%fƻgP x/I~&<=qK UaË= oB> EbP%^5*5^x9K|K"W"xr9,W|EzeetR}XlnxJ*K/UMTfpHtgݨ/s uqn wo[# x),׆g]'2)\V<7\}Ad+CH* h2"4fT2 ;MS,~* {Onjb %}I VeU"ޭjxLT$f<xpT@ ܥWBmf^~<<:)ozpӿH\/^{>H~&wb:ٲ2#YF߉8xGe_~!exen#&^TF^['u\G~_݂Notѻx#?,lr&#=D7>矲&E5+yQe7Ub; 7^?y 2x宅<]B˝Sy-5W\Exb%^r7Kب:+mKxjxxow# 6oxx*%jŧI6x>O6NmqLI< x7N<n y /.a$) oݤBSǼ''w[WZxC}܀%7AJի<_1"DM4t^D*/|Z\?H%ƛ74Z/>w $fSOl%Q;X~K|^Sx8: 2^L>Gjȍ QhtĬ!xAχsc4 F>^i^x 2 _ W5 a#`3xsǷ<+/:/g\*j“M/D>F` >5 2P1$jOݍO+j}5"4h C.<*汪>4sJ)aUgjLj祲Sdq41x 7oUZjzb{ZU;/ ힽfgT +!|)yBh3GJ@- / U8IoTOWF4`WZ6<=x<}s,[|whܢ ^1oӰpuxQ^н[gT7s'\ē;OU+??0V^D0Vzob#PJ@g /um 9ɰqc^RFmi煸i ǣ_w̃RU׻ Cxe^aDOu0OKX%m@|xUGy/*½bjUx^*=$UEJ30KjƒPbGL]<82[<W1CRI=JV+c<ݳ-odh0'nxxx`w/oKFx nesAg6PM&ρB<5 >pxԶ<)<"$v74 =za\7^*/˅Pea,|`( `KYOsR.B8xg4xU>?'‡޲spF2Lp%0K k@T0ܒz8%+(y5yP{ܘ.4?ށjOP9Wܯ)4si2g.PGJ"O]7^.$^%On^+e׌,0x$&r ''/0x 'oG8=GY>zki6r-\5 W] Kz7νI׎W]al EOq Ie0㿉vm޽@X6<饏xAOjLO+irjn]]ܥsOej%'wD-/tqx38ԦUuZ9jrC OYb#Kp<}6.yo[ՈPw[Y:ʑx8Ux8]*{_zUEs4Fxx̬ʤwxgT ZxWA$) <,v h(p^'kT`ЍdL4* xۋkB5+/qr|t/;`;p ~O9̠NOmMhamf]^t;R=Ԯ[ɍGecSaVW^(n[$<0xF&*`*Ь[E#J[<ƾ-%Ǘnodw[Wxp_˺,\ƫӁ'ރSۖ,T7c>^Y?m xzcȾ5HB;+o8x>'ͅvO_$!1gkqb\S ̾wݯ_sscuBx3xHF.e`` (,D_s*-oKbۜwOU/lsnK {x'fWˇw{%̉L|.y0!x0!G2%;-*Gx0!Gx0!GxH2!Gx0!Gx0!Gwtfk;S≰gso <; |hn2N>=?LwB>o΍5#^y&rؘxߓQfÔLR𗜔,%Vƿ?^;GL{|GΫGH̽;Yn&݂Y^0aͽͬn:c-x)bY*\ ^ܲەF-ܫJ s+߮/VxI2*pْz+C]:m%/蠷t-[AoA2Fq[^X,U/{ 9.X^,lrVok+۽1րWWUgvr? UY^֮_Tk)jb5ŬbVdhÝרZWuL¹רgrs ݑl)C+ ;:{{iW[^G/e/{e)mKi,ܯߙ|I:XJ[exLWպ ŮBVc qk]3dx]~m,d,Y- ^ȕ-d8txANj b,nXHcxMet1^Z %'.e, ^1񺫆e /E.Ηqcx=eX:^ΗX:^E֊-/>IVEtnwl[Dž֪i45{fxYz12-z "tU;-a-bXex?Y°j0pa-ad`xu* 㝟KZ8^?"^?Zs70x2 e /a@ox Z8^% -o%e `avx^]%3 ތ)z֯zd K{:%;fWtbn?}2PU?po( R1뭪aOwGVv.LY6^֋ D᥽xn3@^܏{7'z~. K;i-HNFW-/wS_=?dx)4se~z2Sq8p+_0xҵf˝WIcZWLKu*F"*B Jܻ+RɟϾY:^GR}dYDs^]c,oZ~%K/){18.,; sO2{ Y-U:+ _ [co@KL:7_s?' WcJN7^P~2.'>˯\ ';w/uus^+Jy$@ޕWfO݅{~vXw K= |tmDTB@q82<)v\}`KJ'.E(K,y'|JYŁ: ^%O>4bKD#uoUPzZ{}R)mI/a($a(U ~(Px,/~mXZW6yFX>{ S_nBn$pHⷩ__TU(]LOaA^U7YiH( USF [^C^,;Ϊ/TaSI3y¨g!jwwqw+-^?Xzt/UtD-OtÙz,rofZ<1xe s} Z}-2}ۇENG"=^ꗻx9S }8CK)\ae T@ ?x-ʦ{Qz^8Tx?%qϭ2 + S% vP2xz'w?FUxT8*+xycRNbv\S8PUi)yuqj&(d8[bfp:[<,yS׶x>^ x/yDI/,ԕQ$J4TxzYW^ϛOMjT%SP#_yWƱYnd>ޣ-^Z ڄ6T_ Gx*jBs'ǻIx!̀p̃֜jDYOnT]]]ݳgƻqCN#^A)Q%@䩪GPTWGJQPJziBs/*xA4% [@Py]xIɢ+jA#SGQ2SA8B5^`9; =Ԡn5^xj}iT{ ;x R"^xuO%B5xST%/-y?k]x*yr4^/ҫ|6d7^AL;ч‹*<enzyxu%x /0xaf xO}@/2ƃCU"vxE-n4c\&6kUUS%ه^q]6p*{xg .AgrOj‹YH"l$wd[}3x~up ZW;n[8/ǃ&>#^k-W5 WDn{]x _髧TS=cv[/x=^<1={^+ G ^K^Q.^w3\7 `TxoA Pu將x3%/IQ /0xXW@3 b/)%Oyxea/4Mw[x_/Aifϭ}dhrɍn=qS_8Я'ϋqwo$^=jpMS^K^QyzH G|͡>>Ux Iy8/^;f3`GR~"?c%YR8<{WxjS z+V6^TXa G8dpv$.܃/`0x+a@Unrs(4fy[8e%o/=W=~>SͶܜxOᏛ V5L>xZ`˔xKdoj} sN=-[,|ÉP ^0IΚ\}% /Nj=axěpV[j?Np~^5qYx'Ph'E^2:eu#8MxPxVw^S\n{מ]$^xxh۰m !ӕc*sxmNmw/x˹cԓv6Cwa㝃ɻ#T7^M51Ꝋr^~ٽg2I܍,FI[񪔿x系_7V?,R׿*QB8wfp^NKGZUrySTYΔ0$ۗ;pˑVvx;?}ibU7!)#YOgFaVFȏBJxxJ[8zKmg/7ѪFp>c̄/_}Z\o.ysV?^^;%bqI_8f< Q]?2i>[Y˅ uik8Go:r+n$ck*yGr1\^`yׄw0a] ^׾'v7w{I}P_R؋AV169nkuψ] ^xN}PO }ˬU>'RKǻUQu+nDx}'Nf>j[e";uE'f13*{mi`<ϋVSׇxg1CxbLfNvE[$x)HIIR'K|M+I̮RgHgsxg4w9S՟1^ׅf;yۼx۪fkL%;َ X-SkkzR֋X۹Zqkǎ 6q_ )-Vh4w)<\X/~q]Y/^?l2LZkGX\mx&׶{x+kf)ix{UxMgkͳ/B^?^sܶ+5|3uB%5umklZdJX^khxitRhnMx<##x -zn3n 挏Zyn5n挎Wv;)myne85cRjeoCW7ǘ.mooXamtbj9vk4gmhg|#ֆvƛc@oeq%Y --xt9FCwr"v?!{x6c(9v,-xr<[+~BvwBvȎwv=<҂GFo10팥-pxsĸ9Nb~1㖀gm&Ǜ uogMV(=™朥вZ!N1!{njGx{,eYgpr=2!G>C26ޞg_*#^Βƻ}a`o";KoI5d#dx{wORX Y{$ҁY50dzuoSxGgoSsAx3^<+c/&cx^d42^G:9~Ovxӗ&#c5tj{/-ϷkQ--#֣ϮOط:/& {,yzgtwYj.1!W/{ir5󢽗`ȢNbB7_ˍatݞrZx_^3W/;ڛ1έؤ~#pGϴJÿgBD/Ut'ʯh܏:k[#$cx7$|3ޙi^oklJ<V:0iybfNLב8M Ynx481GdB#diǼ6%0!GzWZ[#w[Bk[˜d=nF*Dz.y.\;^ǵQBB_ʿӳWἻ3?& GA*1+~)ӿ6 91|-YfO5x]m)?t;`wU6Wvf;a;8Ώ )8ھ1?>ߦ_|hm߳ u/9d[x獴Lnߣv~nl᝷xx]'_Pi޹DDoOwwwcoxxmk9񎹄;cowv)7r&ƫ{B"exK\a}/^nc{//jvv_T=Y~r:']xyt@2- `!x2: _[ IIMD2wٓ{$`|<;CuX#ߔxxES<;o!-;bK5^sGOg`^K'/$^ω _}m8O?OcB+wP;3ΙxIq1?^_2 m]tY ; KN‹;r ~;Y2ݻG~xAx3>'yoZO܇ƗvLO9/YKM+$|exwR_}[zN3I{?[r!xzx{u%S{OqW<2.w^VWQMWz]E۷S>L^^ײx3}LEc7VHjE΅WRc%‹TU!6z7Lϕd5^}6/ޤFⅩ/r][t;//O"%G~:dbKxW.ō$L΋+4(JM '/ /E(QQ?2w۴̃BFq4^R}V}D _ez-6j˴GR'Lzx7CzjX8^R 2/a]5@}" ϕfԹ=k*576+4L̃}R@aQ0 yBrP'~"ExjozױW"Ao V./w1-DJ?<ȜZHVQErRБblOxW5jmiNW:p𘗇y"Op9 <؀»A@1-<\q^ҏT1ef6tx$_xsͅ3Mp<}ubI-Q+~b6VK2])_>{OT ^Gqi$C$lOwvImLE0^)IQ㽍xŗS?t̩=5o,zԶqXa۫0UXڄxQ%AVṦxq܎xp=6U{8cxpt)[ «=M(;TxۆwsQ]‡7ji0XK-9TUQ0r)-]tx/&D5^p/+cT6xEsT6aG63g6 bMO wJ/{XbJq|mգ*S9xpH2ZWӺ bm[p|DXaD="]3FwkP@_.{xp̃D)vH ~`ƒuxzFc:nk* _zTa-^z߁gH80Gx {5`(~'uSEW͈e]ۆ1:|WX9>^Ag76p)xXÁ˜Say{ih<[ToV%NB(S)flHV^vx=𳷻bB\g^۳'5FzÓކk\ȀtҜb&ξ-o_ x FEЂxR`/?WpwgI9hAܿas=8إR^ͅ'MRjo"ǾSVjzFV5Z%o;?υ2Ի '`o rND6/!Pi<̜׫E|Nj\}\ϱwTyZ-rǃ!6U3:0M";xn`O#.y4r/ק"a0o _䨊m[WOU\JN<۾7Ŀ.nu_g~Z(x"npNk7ĿN9ɣJܓxIo;m+YoCeM筒 ex K$ѡ|Wb yv/1}^썾 '0}VN/3 O:qPw{"̈7x\ʢg;C G oVxqĹ'HrV` xOW/D_N~%㘧ƒ.y7JrS|ƋChUġ N a@IXÑd>^{H .JB]jUszM߼ۃ@&YeS:XM84xq3vґ“ Py=m\<xygˇx{a{?$[;O xx^fjN-4^.JFU1O'Oõm>o/rۓ]twBF.y/ԍL;!opJWog&m CC>88bm/i, OwL/7xv3-OSCի /6;z&%>!9Wh ܴ ,qJWp„N,k)X;Gx h<\4§8ZHIʷ&^1]xBmT&}x]´_oOe}QUk6/u}JV1xZ~_0pJUSuū7}/JUz'C3av s7iu`R ISg_.^{`)쿗GUj ͒:pxm@߹Q1(ŻmxcD17F^ÇywȻ-AdxuV1?3{xMqo xS~6{x-D:'9-5{x1{o}s:g[_uxCxC[ãx ZU'EG-*2#t/o G*we#5EfdV>_sm#a?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~                           ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~    Root Entry F`Cr Data  B WordDocument&ObjectPool _q`Cr_1038650696 @96>@) _q _qOle CompObjuObjInfo  @96>@)#Microsoft Photo Editor 3.0 PictureMSPhotoEditor MSPhotoEd.39q @96>@)#Microsoft Photo Editor 3.0 PictureMSPhotoEditor MSPhotoEd.39qCONTENTS cCONTENTSV30_1038650810 @96>@) _q _qOle       !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~[`@ @ @@@@@``@``@@@@ @ @ @ @@ @ @ ` `@ ` ` @ @ @ @ @@@@@@ @ @@ @ @@@@@@@@@@`@`@@`@`@@@@@@@@@@@@@@@@@@@@``@``` ` @` ` `@`@@`@`@````@``````@````@````@````@``@ @ @@@@@``@``@@@@@ @ @@@@@``@``@@@@@ @ @@@@@``@``@@@@@ @ @@@@@``@``@@@@ %CompObj uObjInfoCONTENTS \CONTENTSV30 Oh+'00 HT p |  NASA Software Safety GuidebookNASA-GB-1740.13NASA Glenn Research CenteroASAASA Normal.dotRFamilie Sleeckx      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^`abcdefghijklmnopqrstuvwxyz{|}~M`U$$U$$IIUIImmUmmUUU۪U$$U$$$$$$U$$$$$I$IU$I$I$m$mU$m$m$$U$$$$U$$$$U$۪$$$U$$IIUIII$I$UI$I$IIIIUIIIIImImUImImIIUIIIIUIIIIUI۪IIIUIImmUmmm$m$Um$m$mImIUmImImmmmUmmmmmmUmmmmUmmmmUm۪mmmUmmU$$U$$IIUIImmUmmUUU۪UU$$U$$IIUIImmUmmUUU۪UU$$U$$IIUIImmUmmےےUےے۶۶U۶۶U۪UU$$U$$IIUIImmUmmUUU۪U0$J  C A? "b*$ϛ0>:XuId0e$ Xn#ϛ0>:XuId0ePNG  IHDR[sRGBPLTE@ @ @@@@@``@``@@@@ @ @ @ @@ @ @ ` `@ ` ` @ @ @ @ @@@@@@ @ @@ @ @@@@@@@@@@`@`@@`@`@@@@@@@@@@@@@@@@@@@@``@``` ` @` ` `@`@@`@`@````@``````@````@````@````@``@ @ @@@@@``@``@@@@@ @ @@@@@``@``@@@@@ @ @@@@@``@``@@@@@ @ @@@@@``@``@@@@0 pHYsttfx IDATx^*@{[xܩ(t{݉Q߳5l!x>]O!t <]デ5t]>s}D. dwt kzeB[=jUpcH"wzhL]VqK+ȄtCaa/6BSt T6AuVΓKFc o?D9ֲmRLn~z7@]r`ULTbVl]άd`'Th $z ! RR72r6- |_3P%.r:׮[W<^l=񥌢٨D|fȲrgJHB&F ( Lw-d`nJަؙ("ByꂅZuؗEFadLR =2J Cj\b[ap)Z9^{G[S<|-¥{> W.ԍ8YT%^T(SV])U4_|j:.jAkJ%ICj,#p1Y&.oLtv/BGi&f7a\|APA\ +9\yB)WE\!X 2nr` W {.ȫ첉er 9:+{D'010ӭT{·Af&ɬ 1T?`/?'H(wiBWn.b٫szĴs`%EQƁz ~Q*A[GI^)Y#8@P!aN1+j9_inݨ.F (֋xL=OA1S<vT"A٨ԃb, ۥkdk'cu9La-`u`ಳXx8YUPGyeIࢵ_Wrd9:9K͈dWMx.Ge:`2w/3KHҨ_] K^`ug&˳"GK-\ \ܟXej]kuf$]$S`)0J ˬӯ9$p@ˠU\@,5ׁ0ȉXb RZV4y)}5dG1CN)'X ][AOݧLW|d^P՜.4x4X@ G9u)uK_k|~ AIaF;h:^<>iOv]K<>hMdiTd2aZ_{˿)xM𿄎>>`"kj8A)S +l79Q40 V_ή:Cs95κh^L]IП CJP$g ԭKM}8荣 tɤ{pǑǼʆu .zULuq:>Et<C0LgrUaxa4U.{i3NPiA?Q>l :AJ>؂O_c3Lh>|ӷ] t(4 3Ű7r&sc k|>x^ cS9|4M4>!@?>C#rd`o{gPOUG4!с/X+/jS2Zdu׸?ضQW!<{[dx!iBVo~;!KCз?%!hi8ok@[ڼeyndynP-JEoi39C-zQ9ߢE$f->gh|nj_cf~7f[48 aK_>*dg보cvwk [T)e wtqXd/G4SIpAv&V_ lnԴ : y)hGH@*Tw.f7dGyéIry5R'᫴y>Aj0g3:Zo]9spQ9j'Hߨ̜+f`xRwyPs<-R?Լi ue^Ňު_4=|A:OPyEl?d1Q"cƀ\BA$)yqP9Aŋe@QTEIl6Do;D)ȰEr~Xģv=2 PsL ,Uwti%j.~5bmP+}:A}&h=g- S h+q)`.;"dܰceFV{;Sݵd;B FI{Zr{-a/:g`M [̣}?V JQ1}(ؿk}߶?TF%k}~,6Wy`*ٝy'17[_E3 Fs#Mh5'PН+,r' ܡk!R]b㊶9eH]f'c=P3Aܠ ! v7 ch}0:h+:ȸhjٌBd3)ހ~D2:6Q`嚹r* 3ME0^CGJSbA>L|>JTr`R1=1'~2sl)o||2hWF[x~AxǗ(PfsYA(2=8HJG[Iz2mm<݂{ g nkۢS?bivA1 <)|&=>_&ú޽Cp5^!貲TAjG`jb *3({FIz ݌PqVSn?_|Ʋz\%! ::`!l@Q`t.s|0`0xq /v @0Z3GLߛ6;څk;^Eث㽵/hV)kh'.PcjPAM]=#0N:qSCp̴*8\dFх5ǧ0. \4N"{C]oy Yr|`(-Me%.y /j>^j̫f̟OeR}׭ͳH'NcjtXkV4Wz"03hCeAjMc5EQ23Q_Lв 40:hNa%C[ѷ${|R=VcK>hVnB8僨E8\@˄F}08Uޢyv%Y3bB#ɊL.ȝ֡GCjG1El ' ]&G%nQT9B.%s+j֎jp(b8O#,ڑ*ף8IiC]wrHPw s!>XY :P:))c1Ac.s4 i%2#ȫd /6B?MV[}Z=SdЯ߬4U t] T8p*Eq A$XN&FcSbY xeowZV>kHZmOWL˒6__mmho##X%n@-`56E~pG\y(KKת5~32vy. Q77Vdi5Gr&.Klb{l\lA D_iR,kE8`醋bd߯a%;xyҨAh-l!X(ykc^hQmdtI,=fZ &(`IT"z*kHJ |Koo"(_wՌJGuhCP,P *nf t }؄g{l>idI'ӝB)!Se' 8"o0†ˆ"|P1r ̰~/lluXq^<z;k_Nvv.䦻 .TTS<H`3ldKM2.qȕ2K!/ꋢ>bV@qp |Ċ,n˚xh^s%&'up5 F@XG6|`4RTr^ZKA8c3_˄ecKUs0OpZc߃rGR&4(~ANl<4qȏ&8.bi-WL=CVA-F cu&Cf#N 6DiYL|;kf7dB{yVvd |Neaȼ褙F൮6HplraSsホuq ~V,ꫣ /e&[Nup%w+RBW(ޯSݤ|RJ+M#ii,#rR^QVh6H-cLq4G㱝P EGԹ5' |MɁ[IEQaVAѧ.nV/Rew XhaAu|,@Z6i'gv,I~WF``D%g(wo O'fOXf`i i)}̓:B*NHBlɂ%1* lFE~-Pg;ZΌfu9 -ʣqtlneTl@ISo#{Pyؤ {,&!)r 4 p IQ2DI_yrLXD(ʗ aWȩhW Au?11l#>! +'fV.r9>X8I%x\~ral 8!'ςE ‡ bra G~i7ȅCp\ƞ{rY1Z NG>xUKy)ݾ]BAZvuS.BxS[::!yQ9FmZqJcy2郪M+ ԼMHeIZfJ'.Y^5ҹz$A ҒU5 pt% COc=I B5Ma[ZĕeS?t5>`+>+ ` _qzɌОZDŰU^UHފoå\x>]o-MI껵2k jqP.l0f r$b)Gj:\x`e{r F8cS3[zʅ`IY܏ IŔT-'_fd(!.2IfWeKJhn8*3O.ͦuijNWec\T |YKBk0_\,&')6Fj3OҾ[kCمJ~|z跪CD==X*3^S|CiA2U¦)be*!kރsV}`@Z+m0fWk^]Js8dB}P=z\;2C} ,0ԣ78<>Qc5VJY jΩ$aӀ92%kqw{Z~wyw{N&hwkI_Mh^aO>`X]Eӑ5tO?>x>]O!t <]デ5t@IENDB`%Dd J  C A? "b$u=4Fe$|n$u=4FePNG  IHDRM@sRGBPLTEU$$U$$IIUIImmUmmUUU۪U$$U$$$$$$U$$$$$I$IU$I$I$m$mU$m$m$$U$$$$U$$$$U$۪$$$U$$IIUIII$I$UI$I$IIIIUIIIIImImUImImIIUIIIIUIIIIUI۪IIIUIImmUmmm$m$Um$m$mImIUmImImmmmUmmmmmmUmmmmUmmmmUm۪mmmUmmU$$U$$IIUIImmUmmUUU۪UU$$U$$IIUIImmUmmUUU۪UU$$U$$IIUIImmUmmےےUےے۶۶U۶۶U۪UU$$U$$IIUIImmUmmUUU۪Un. pHYstu}.9!XIDATx^흉* L.FPT*sS._i5s=&=-%Si[r=&=-%SLHIѓW1:|-?Qͭ( : TLg IQ6j8e\h,?>[>gKL(֠&\y>HRWE>P*>rd1zx,1=H*3ӡ]CL3/u8M_Iy@Y1(S$3i.Rϕ&`_Ǣ'M~C7X8Lg@q9.N6NrLk랠@JtgNs3uh 4IQsؼ#QLMϭ[a:r X^$ 4yh: ݱ;|\3o &O—;[^=W 5H"EBMJOtG| ״tg/+4}ٻ4V9M_E|`iOiڧzv}#rit߈K󢕞?Lj}ڣ=ϻ [NnYi@I &6r>ө iRjir&=N  =~V.ǦhJ~IC# b 0iYNeTa+NA(U4yNJq wg89MB f3OdB'*'YI K&7T ,*TtӬi0np&#\!uJWO-=Z0d2!wMt2[e:K;MUR/͛&6=:b3Gí݈!&QY>fbMN ۻӤQ"v&V%߯wWi[r1`#YxWLx-gtxi-)QirptZxF:D"4 *&&=&=-I]{sՇjij)$܄{VM [3 I 8" fưސ=iz]ߣ4~&uMHǷ&3:NP9Z4 r|=^T"x"!|[ro2!4ݏ#p]~(A >%Jjipchü?#Q]DJxpPOv!Z[yS<0 ή '̅/a`t(XRO. fjAbLcIx|*"a 1LBW8b^uxtxAMT2τsPaKch‹bFVX4SAgn#P"al 1r$hR8LɖQxar\)Y8j$%e6iJ_\XضP{tjbo4PP(޷{t@a5޷{tX((Z=:M X=~&3_c1YiT&kH(&lv0SJj, ۝D2L49M~nPwLcS'ޅ3q}wք<6uc:N4MUg:=yM{r=8ӄ6iʾ?6=2 q2R9rq2kDj NlJg"n&oJ0LmCJӿ*ZZULmK |i9׈1j[4e,E,hz0PLmlJÜȃH]dx#2GS~~ bRs Ҕ75Y_r6af¡b@V.ZA|*w/MxegHRnrBG [0i*^&|#b˄6hjϧhH+s+B _5W$W)t q0z3MMƕQ. Gb4%5,EV(ʭjTX}@ _'XTR+VhԴXlЗ  =MbF-m<~_-l}HA1<7}&<9J)O-jYGļU3a!TeB]r᡼iH-tML:ÿdY=.m(U[PG{lgRrNaOi5O2jӗ 8MUl' MtP Gx5aشnFs N$tN M[ҩ&l79 8uL i °wNSM=~ӫNNSEk3c:N0&gEiW`ip{\`n2o<Ö^h<6)H&TВ yl]:MuUhWL)ɛl2ʜ&[)^X2?(4཈SM<%>SiCS ~cjjŸkm&#ɄNSaքg8i*p '&;M~& ۷NS6qKliyw8p^iM.0 nx2aѱ\O(g֓i.hԃi29M[&ߟ mjr 'q CM@I֌JS BS\Msgx1YlJl3V0acS _+„~3&pr&~0r˅hMD {p p_8u4Mţtɤf4G5J[oc_3JHy}4gc G5fb9Iq c]QrWs 4cic=6E|'',2 8l,G7rkwק ~P@Y&4q5k$24AvI=ӔP.c"ltk:ACJ6/wM_;0!Iy.d+{"4@L@LwP%3 4p̉SY`2t=BϡJHIQa*D$NtU'M!m¹KiJ` A*d;JcD`("B:hf=Qb1T),yŸO\{״c-koA#>3gh*9V֌褵^)1t5VKTčMN4SӒ&cLkR"iq8]ߓ<0wuzM'ZxBzYύ N)D ߴ;+Ja]0fr 0X4y'ONC:&‰6  ] ♕:VƝy܏0̸O\\ Ra xF3i ׶.do"L x-Ƹ5G [vUС4 IO7{M Ot,MI} )Ds1m::=a0깱3Mz躇ߛÓijNvd' P}hW}K5iFstQU*\ĭm`0YFΎMp]g-Ϛx*8p4;MELFSBXoa4LNӖێZarts/4١)O4)o4Yre{&t.Vl?W-X5qxlJn#ir:-Ts&IOIOKMNӤӤ&iSic4)P %Jݨ^ݣMCvWoobaLG$Ǣ6|NؔߖJ„iJ52UuM@3]pw|(Z<%54-xU<6ٚޛAHN4O^ir&=Ѵb`44MiZy4]r 0|7h`uAizI.0.i8i;:M3[zll;zl:(6[I>A&Dw+.cN?.uA{Ԋ3C>w7)%o0Jx ܫ}߂#hN7(gzoWz&."M[o)L7>d%+3~^X@S&E&$U KcIhT/ԟTtZO_9)ܧoVw ifO#*߂&?ar~_izd/[4qij8M-J~ &.-r}E oĥUn/[4(~ߗ-.M˿nZ9.m ۿ e۝{NS'Tir  s1{zq =6yl"ǦK n+[dkkQ+irRxGMAͱ*&[iqQ.T)l;!Mbe*kXg Ħ$(vGZlڴЉEg5)sI{ypwkst( N13M_tUh?Bn)ucBOmp*g4b_ʝuQE_`hcj]MЇcSir[zGb&4EyFj rڽ7MTYXRY90Ob"WFاT6&yv,{ j" EWu)=^`CGx~Cl%K~<6ɺxMp߫"=NcteaK͘q6tx3!MLlp`5Bh4ft Dդ5>ғs+ЭAe r."Z1qtB9xW j$oڼ2G7Ҽľ.V ] u cP ge X3h10#]Pcle(YfӴ$6%d"R|YXȝhʂj`$ЂiR"##Ż i>M 8,&U>("!1A,k[ p^t%S0R^qh*ޜ/xW@>ԣ{O3ik\Nq45CYjic~?ں OtIw'ØZa @Y#Fi krRIf-4*>$ F wTT/BRx;ge08r9:(o-;ÕXJW._pVwrT)84yM.6/Y=M+!o4^qT,<, ht8 NK1ŕE qQmay4=ϒOa鯟fFS@j]Ym$VIjZl I']WSGICqzʼ=N2+{*M N%b]hQd)}<;LJ0#Lp5ÕI  ^0#D;a'>ޏ΀)E4,~ca'%)߸8CRL  )09<ZߛKbbsB&ef@8\Bfv'Ǧ $_!t]z;@Χ5y8MiDEy v"ӸktT¶Z=>Biڸcsp:Kn[ʷ^X<6pZz?,i*DuUi 5;Ksw1aQ,y~+wJWZiCvdkEy].-9A{&}(/kc'-BEnF₲&M1y (պ\rt)Z57H@%GI֠zy\ Ʃ{òvVE7Tg\j݌C & "Md s `1En!BHShHQʵ7aSzZ8M0ZlDB(#VpƝ##NWQ9"x^h+*&F&yP)ĝ{*E9}:}hJf8 V!lq ficх]JMi!:@ELVh0DЗ!ԣ)" lK&stQift3]7޻YR)M?i%X% .Bsi}& XǒӅu1%m8MQxP͛^}yL yNgotxp sx x50hJ:N`41SQj7t%BB9(4L4]rWWf;݇ KAܷЍɏ]NSԹo˕֞I޴8#hiOó& OEHh&4? jCRlz NSt)ܐwud3KoNebS1>Bl1QvuLH|B$6,ʝSzSa֋hjJ$RkZVriz | P~ J? G-`^۩tx"~Oi`Z4](v :3n sw$0U-3۽KJc->M"$! mȅJˤgw#}-$ o>|L5pR$M٠Mr"Bw^in ~alo-oZ0C&|dUI}?JɄĤ dݓ'IiZ=Ұ4L(!R+4iȦƗc8 6/aA[E4NG,z΢MBWUx,$>ױ|x96Ihz1ɢ,ħ0~J6GX6qehZmpmY_3VYAY]7VY~s㎎V65t`l8f- 2,<ܗE[ēEC\>u"fCJFz̧!aXIk_kˊylNV4q*eſ<6MZWV/+ylҺ8M\}Y/cmi*Uˊyln;NW)r_Vcؤup8MC*s]>M$AJ&;Mlȫs]6^tIENDB`DyK _C.4.8_Example_forms$$If!vh5*5N #v*#vN :V l05*5N / 4a$$If!vh5*5N #v*#vN :V l05*5N /  / / /  4a$$If!vh5*5N #v*#vN :V l05*5N /  / / 4a$$If!vh5*5N #v*#vN :V l05*5N /  / / 4a$$If!vh5*5N #v*#vN :V l05*5N /  / / 4a$$If!vh5*5N #v*#vN :V l05*5N / / / /  4a$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54$$If!vh5>5;5R55 5#v>#v;#vR#v#v #v:V l05>5;5R55 54!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT!$$If!vh5H55W5555[5=#vH#v#vW#v#v#v#v[#v=:V H05H55W5555[5=44 HT.$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r54555/ / / / / 4$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r54555/ 4$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554.$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r54555/ / / / / 4$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r54555/ 4$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554$$If!vh5Z5r54555#vZ#vr#v4#v#v#v:V l05Z5r545554Dd f/=D<  C AbpƕYvgnLwnDƕYvgnPNG  IHDR SgAMAPLTEٟ pHYs.#.#x?v IDATxͯ=Ozw(!(H )F, ?Hv,6# VG0;F=x H,`U$=}8OV}G}9UݟS[qD,傧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧 B.x )䂧1?og|/)Bxma-|9"Hi~/%x {\,?ӗ5_eG:‡翰H_G,UR†׬zjm^OmKkDŽg Κfۚ < ^ ~}/[H뿫ǂkb7,xf;筻Y󓇟ŕD{Q֏3l֋/;y pLWJ x& ɋdk: xm¦6r5}!_8h_=C%fdv\;WcՋC%txi'V}ͦGJnkK0:u~5/9TB!2-xEK~W=9dz 6=l 8WolNQ Pjx Ko7IנHF[*<:")kz oD18K:BO$(-U,eԼ%«)&0Qg iON"K- Gyy"?sPBh:/nTF൤ˆ,"z-gYar4}1H<"&Hu/oI\SZHQKIWrVH-*@/HZj_ɠ ¼RFG49,|%AGi`J -sh(YE`9BGMGJ| <}Uisaj%_9nBhy'ŨvJ[HHg* eVon }^4$0dp!3.I|U_M&[} ^boLh/bn ž Gi54Fcy+EO2M $Ҥ*"茂=ZaiWEQ'5Ӯ/LFE =X^} _yQ7:KD>=2+ɓKD9n' YNk/  9zLE: qn|;gE7,`n2EV 43rIL. ̄\~OY B:O̹_WAb|2gt૤Yt9SY% 4m||4I=޿SgAqKVSsܕQRid5%'Ö;RF!t/9OmK^re`rb1F!S{IxX&<~ي$UR ף!F~:ICzKNSެ{|C<>fZyD9 KI!b$Iۃq.^ 3HS:;>; f<;HS2; n{r|Gŀ'G# )Gpr|}KZSME&.koK nʘ+U3nWDڕ(*O-5$T2QjQxAfr(O nU17|S8<@N,ϩ#Q:53ݩ+%Sά8ά:g OTTs3맦UnmjNgN USϋ^p2ETCx4Zgw{4nBw+{P7qjN{Pg=/ ]˩9(ū:xoC ~*Q4.F8u ^3{6 %e_^ ާś;uߏ@YЋս%{xCxu@nLQEU x 5Y~|8jF}3*1xKQ &x{MG5F4ҪCIkYVu? ^ly^9Fs@_c,M{?3sRxg{t݌:$ϵic?ͥ,ᕏ)=EP`Tw3arDw[*`vdJ@0 ?OjZn/=`Ogpxgb?5-فmfANa fg[6/]#<"]y _E+>}k:c$ ﶸ&^yʚ;3GׄGq//\@̈kyAx4Yg 3+W|xP>nό*{]v-t$Y<($B%û^ Z[wO;1 }6o}xT܀,qB^vݖRYXM<ӧ=dKgKv%u(^G(f$o=/}xd nmx`Ѕ )~'k>a0( M?=B<,+uN5\c oے:=0!#(&x- ^?ënUJ5܁[^_öxk9Hb6ԸeSO 3WJ4S53%^ABF7û7ix6A'l8M?kyhëfx(L }ZmqCǧ+kO 7!br#5>.늻O0mu_l2(47wSÀD^7-?̻[)#] rWW.Z^m^wUU!1W fm_m7cs M$Bb{?CZyg\xfwGQU*9nĶ~wqѲ^?Px X[[FH><+z>|ExCC# ~zx͡|j۞7|Fga>teJjv^]qtM8v !ٖVo Qx͛O"V^Տ`=ܘ>.5!|Uc~')YUU?Fx6kmf| ?~P![b^y[5 ྂ7HׅlocsL^xf୺5cH:_&o[ӄ/̷q]/)=0Q?-mJQ}M;oQ<~48(|=<ȏWg"i:,$B&y5xmDEJ?4x3K= 1zx^2kX|v&?_^}zFD:1W7Pݗg'w2S}Nq$sJﵶKWNw{-xujȞ x~0& J΄W<hOgx%x5|@(^%zIL2{'cIxjyR[ʰ^ޖ/623۾Pˋ 6^Jx_yZ^@e"c_^-+=9awjy_yeY^鉷C(ؽs䁗# ӭڦ^ɒ7Hl 6o㾶 S"  xiAeآZi*1Foz<:/1y*cd 7^ti!n;1O݂ *o)^o<F01y~0d#Aٞ&Qd1"T(^}HG*EµyejydxYCȕU&d/⅜ /DD 4Oq HSBZxbxςLl&EK}%Z0;9Ķ<]A/[h ^R> `WYF1[/Lp(.xY](GQ95x ~R[xh`K <Nufa&xUq$<I<٣E.Ks7+/ lF.C['VZVza]^#d1xpgP1 Vxm x#RQxB`DV܏O0mxjr CU!5d}RGt=`u FZI=(< mhx&gl*Bp#dhn م- x3/KC.cwC l[ <ҌYK`)bG CY۾=/Yy^B\w [ic{0P~]  wX&?`Ubj>d+;x~=O>2ϞNJ ςgA-|( +YÖWyT_K@&y R-'F/lx Q;n/+-ޒO! 0FqxJ)M7c GzP&k/a&CD>RMK*$,ѻ#@,c0gP /W`nqxQqxi1Hie~fHm"O93gJiAh=xz꨷"cњ~ex,yxSRO"PEZ8ԣtxʊoqOצi^~Sтc=<ʏGg~:n\x!ӟSDfLitr∴yxލpJy:O %ޯ#M<:rHNLO %SґE-#6)5NSY#:e6j6k1g F3jܡ.LU0Ixd0F'i[z-vOӔNח:xg+4R^2l @_nKS 7GOcq0!x]Z$ GQEOxPS}_ᱟl*R)n>?6<ٸ8 B2P'2Բ p 2,'ٱ?YѯU qղIB/hIix"p&9)x.]U-*o)-yZD y<u8>c5Mf@u#<}`D!'ay-Vj?X-Wi>XBްg !';!kZ_#u# >+ Ob0CyfU綿> Ob0&BIb0<3 AxXO8TV3,4aw{&FZ+?L dx{(nIaQLɼd* c+KhᕒjJpAxuw=Dg !\f#m P<=c0i|0}Z}dh'q4˩axj e\8z -t94`r]h"tvXW@.xك*YjnúzB =AO?ń>vE.xGO`eg1xv%nrCHf6U;HCxi#y>@6%⩻M|lbw^}/&aP,, tmq$#뀪 taVEHRCកaj^ !:n]1\iW6R M49H4Z`xwOSQgbh|dPQt6Ҍsu& eL%x .ht Eֶ)Т+1 lbK/1}+Irɪm9 vߋyu\dn.97̭$_#-z{T*ûGЄHp"Ax~~7ՖnX Pbdp4oF׎C͔C%^2|ܤYM-s؋Ë~BD4م19?.O7Yi7?~u\Ó^<޽۶q9voz^~cJ$޷N#d'"Y+ðkϤd'{P(lź%Z H'US&9]v&PkgrrExt;( r< b,nD-(3£Xƾ56P'DK\O١^7N/ R !fKT}r$g*6W/ł^V7 gh^B s-J*]4!Þ52w+[=wؾ%g߳Knbg f >1-9XyGc4x|2Na yu(D|KfI=em#$ͨ+(Y'Q`|5[: /5'1g:z~Δd7k$NۙI)0eH4.Sm“tJJh^2:OS{zx30dLJBq=7ݟ!ΆjiLS[p 5_h)~:>xO|W P:\ƇoLx @O7T'ޕ4J/9s@ jx8Sݜ gkWm>Pjć'f* @bB;3(o)89[\O%S ceqT-l[kRVQAef+T:0YٵS(j)it֞;nd%v&<<iӋlS tA{Quu&<МPO]RpOVPUU :RO˛.= c;4W֩\, D/ 2/C}~']ԷV߹GwmVzJFJL)㳇yax ~WSF L-fa,O;RpX9Wa۾2ܫϚ('*j)':%nEZdL"(_T)i#fH2"涔+%+\xN9s'dIq7@b5𖯾w=ؓ(:qGEbgWsU`*ِǬQA XWxm.i׀Y 46Mk/Aԅ rs+Gm[J& 2ZO%Txֿe^yʁGL7<ܔU/}zx ^.:͊%F,FtZ?LZoZx ^x#:Hd,<^Fj'a&uDҡ6NPQ/}HUw[t$u꽩 d^rp4ಔ8< d$eZO%{0)x.IPK\=߆ ݝ'o?jl핉/i]%Mɺה?f80n+xA Cn C3}"KRo-);tŭ-|+`~l/uѯI "/9ܔ#,'C5tߡ[Jl{%YBxwsÏ~л+n힫~D#/$Jxg«c}-tc2̊*a x~^-CY궷 %_KI"|x6xS3^m6en/^(cW]v^_MMngs$93ua[ F?s(~<76iYfjl>ޛ^(ѫ)[jl/U\%禵ZE^06Gx{duXBؾʤ&û̼emS!^"斥EQ T]Ffc_J9Դq3YGxvkjѬv NN+L]\WC\;)Tx-O慹e{ogskty7#-`=s2%bj{9qlws +ڷ tΑmTUf%ɃW*-0t8! 7&ͫYg1F?gN"Ւ+އk+~giUyn\7W=-2{c߯3EkaUf@oWk)xn|:x}r=e=sdYg ܲ+~&~|.rj23݂,+[+xK4E%*enEoex^~Òro;L`Kd%lwp4Qx+O[0a_8-YϻAE~mm~Z[V?o yR%ZUP9~M_~&GO# ﶸElf=Q"to~<-r"]!^pQSIa,=) gy[Jdbϊe\}4WW)^bp%<ʖrnURMز' 0 C IDAT o.ENxx2F&୸g')Zx1+0qRFXB-ʁ7 OzxenO|kS/Wz0usҨJo׳_x7۩Jolx->x 7OQry0ÛF#{r6^ ;ݝplˮ_ o.S|̱? @\91:XKܼ@(،{5~pBYESQX|:љ1:³$-uO|s^GWܧquϒTz'#gJƭQ)N9% t˴<\,Zm n#{89?,o5g}n+mNeZOq`ozi!:Y/`ϔ2ûP4ڍ ~k/2_fʰk3&xfK9;T6S2i O3f/0L΂W-2vM*< $c[ohJC)*LWOv⪳ o\.zxRƜ$-&ǀ3'uPb$oR/򜄞nZ)<R08OZxe,3R$ xEQx8X߆35?hקᝓ; me3g|(Ka - i9.ͭZ2+#EKzkĴ}?$Ta //ª1xnp(!['6ކԏb &YZmx9ކ(oEiy b +inp9/o! }QA3W}1׼O#nY s,<7ݢpp8 3@?9)`6A%/ A%;eong޻M>dvN{+%*~J;exxɫ|lXxsCJʝg}1x_Z*v)3_)7#btx1=(3nm(<{ߣqGe$L9+º ^( hn{ʨ7m7f4]KP5>\}JM9+*xRfR[NBrS zGY0o]=bݴ_޾S$o R ;XxGoAP8<&"bWroaڵc«~\xGo]-^GuX$9ulGo ڷ^U6~5rN>:Q txؾff^ ƬuB^S+2T U6S^Cc*~hx QKx\_eJ_-)xy)x6ixCaaCx_:zۖJ4Eg!Y LՑ\坠̼:V/Jo3?V%g=,J/*;^F2a9jxT 8g3NkCw6ᅭ.83Vz[zxanA.S4~WmN5e=qb` ^:oІu馗M`P/ߙ23Z]Eǭ:RQUYൺYv$- -ƀ[F>x:휓H ëRw~ʐF5<6o3ƒmIk4GFo4xдܬ#j%F OzزW o 1V oa9.}}tҩb ӤQJ/?XPk*qBsXD;굼_D[2KxcyJ27N bZx,uNx3o"RZ ϬZ%B ZFoS6FV [ 7ɁJTbDK?:^iAxeA5<єӂ /^RqSQbl;Cz[^ё2Y)oRZ!]lj"^ËpiN o]`rr%Zle~s !-ް_p<Ɣ˾o8r%~ /FNLI}PpJVmz [ %<}Rn"[ 6,؉39T΃g*=B.D fnT.4f*=t\q+9^d;I ݽSQ&)^ln._ SzJP« zw)=J&==<T)/ڊ* lRz{7 /z* 8:,GC8]JLA GQS߶uP+ozE5 `xz}fEo/Euxb(=~%w_.n%;'9ek1gQjz#$&(\>M8x!G}0MN %N%Lx󏩏Ű\3oOUz9b?!@p¼"16T1f'=C9{,Eљ8>CN&NopT־0>CDZqτ^7t%Db`<؞(>9ΓJowf'. _E P!S%[⭄bB/TI'`{  Ryac'!V ~rYt& 1){WZ"ʥPzOe't"N x*KN}`xa8V8X0 pUzbZkKx0=5* ePŒmѴ"4S5^)6 L*ŋ˩ިPzb`NBdEt&h JX3/?^*xP0׈(#T3_0>fvxЖ?]]OgĦ1QWa&@|}&6mcfq 9cKK)L[*`ʅkyz : N0c*n"o~WjskDVӠ#nDgQTRwɪk)g;{6N1,iVhe茷+ָd䤨UEE[Đ//yKCUƝ?f*<]D]/Vc\QTnNkHW`w$4)X0MnWxwmZ/ҝ6{ L yeL7T^LxZ/L%v&[UN/lGj{ jQN9lKnǓjzjfBp7yxʤ1#?ZΤEgk'yB[+@K1I*&grd){{*j6\Wwވbn[c!)m_ m]Y6n̢~iܯyJໆ1\DxZm(>󄶮^ k9@Y#OhkKFa1*l xx Sa׀Z=bi2%5Xuq:oOv< ,ؓWax4ȣ׉8QoYqgѥ#,}T0Ti/l_[fx3+_<"ӝg$~L7LodFC1$U1L 5D>^%S/x<rz>hQd=32FJφw_&&oG- s׆!&; ɨy%D7ޚJ.o")j+ |ԋ#pCn\%{Lx#RxwuMj8boxC0ϫt$xi"{ϮF=]́WRq^xծE֙.| IDATQ{_Qx -5b6{g$xY-ư+ob 1ާ ޶m1l)&$SBxт+:ojlLހL s/WgfNnocO}z%fAlxF{}oPuvk{_ +}ftFLS?hm-݃ gK'pY:4ڎIFbx;Ϥ>mV @^7>~nφU9^ +.2}!38GCFJ_n>Xmh9<(j]>`<m4W~6MYr˜JR9ڃmazlXV_g$xPUgTBŶ<-+Ā ADxMfx7Zi hQh?wkoeWLǏorC<xMkNo^g>O\J(~ R޶x*QxRN[[5IK;rm]JvO%! 1' 07rK <ٴ M'{+]'=l F7xxs;ۘag#J;=^ xb0fDGgXB};W-?3c#JkZ'+?yhT1bTi[Ynp#- #Ĉ&xyŲc^sV P#M¯#n#Gp?Sʹvp>;O"`լ^su?@sI׹GfLB&^[35[al@<,3SEZX&|2\~mp#mtE ;wU@{U]#sppJpx#76oC<8QP9%>,K(h{E[/ [x0O2' y7@{./xƕSh0O^o1̐F9Ān Nۃk)zoV ׺ěۅ2ڠ{YKpQG1`x~Fw$<ӟR/[~I%6~-PBށrNOr$…GU.'7ov|%#xHf+J|wCxxO:uF& x%)5]&  ZC"3_tS~GČ抌8XӔ9?DzS;T*%:xv7%)1Ĉ #S QxFVI";Ã- ~- $u£X^FѼgL9jK&lxRNOQN"(U?m-FkmP(791x%<d[Ca fHI<$x5ϕKF@G 1L}XRZ_+kzsw|^5I3]q.&JB7roC")xtMX IӔ/^)uO*1, k1o2X&#&#zSIxQ?bԀQvˆ{Є#0$lW9&_ !xS8Ⱦ;Hz}px|}tûa0àW$|} H8 $@-pL}X$roL6YQxBSr<˽%b(ILg@b,ILMI,{8a@ 7y&b OT%#8<40%Q"w:?k1L|f HS! ;Jl]@8GE+CC ]@$YKnM *EG1$9GQ .,xqĜ^2zZd>LDbDnN/ݤ ]>5 ^2za!tA4-]SKBz1aʚ+%W#G D)d n#3F2f~wegw^l-`N4%7H8<ʙe <)30Y^ -:!2hV=8<ʁ L|1Pxs}R\i%62hh{7qtABʀw {ҷމ'J/Of`@3$O.S"eS <]b$SZ/=-;iLMHt/Sf`@3^23oxɷ8n"Sf`@C[gJ!䇗'302.bVTX0 }_KS%"oۋ5!:«X' M`g!AxwL ݢtZxozT| ,EDqN1GdL{}4}ޟ[0x ^[^Lō"yr«gx7d&,W7ky^=+p\n+d9?M=DAٖSkx8ʼ?) ˕!78oM/ V\;xns?{_ .I>R#o-n f3Dv s ۥMϔS]@p=e2{xKVLlCzDO<asD.,ǟuR&x[ 7 zxwvzx03#Ig<дSh=P^ ͏eއ6}rǷ6%35-Sॼd}jOǖTͷX.K`k _yvOon?}hmtDx\K6}ZJE%B $J;Z`_^L%w(B1ًY~FgDWO]\Q4u[5- /1QJ?ELRKxa_xa a"Pp}W;xUHOǩx- 7c=[J|oOI9>Mo"cp*x!LG3-: [4>Pv'U#ߢMS ڃĦGws^w: ^aMSJ-:K1'xxۮ܀:"p)(xWMIg / y~q%V~2jԩdS,;=<{]f&L9L>ÜԡL(mB*c*|,[r%$^Iӑl[x >76.# zD#o%35(+]"g- -;A / ~Ϫ$kطSnOi v<:<'(4&LU#5~vcxȲVYA$B mBϲ3KxH `#"'OXn>g_0.M/GVv]b&Ž#n<#lq# fA?!x!4Y.# 2<|Uk:-EB ݾ}Ekua5~nqF2qcH$8(2'el xuKo-snBat5,W| oPvwaV;r&lj O(s?<5.& !>a*LtN%΃Uy3uK_qX|H ^3>O )SVqw vj%6$nBh𐕧^xe@x"Gx.md}"Ǚڨ7eߙExmmNχk+GGr(&ۗ*3u0t.q^KrSUxQD n\fʚur ,!JSuzfl$P@QyD8G P0ωo߻nv)GjfL#2VX<U[ἴgDqO}bgS%2,ݺi8KDIx<}eFx^HC4oc>{T,?8>fn oq'^G5M٨m,A߅o**7[_lyW67?H\f[qxePErS,E/]r &Rm%zڜsoXd^(-9Y̪ݩN+ 7 ^Ijq]S[ƒGlU+b 7rx r4jI6a1%QO.wK) )wD;SCvK3I:K$43\'L@x5g *k xaZ;$HDgeπ视n+2>I΀'wKiN}x?O~+(.'8Sm $ Ox{F' ^ɖ7RO1kx3c)p Va0d3Ʈ׮]b;z~sB˓YwKx>{O%cg? dEn\ZR{MX't[$wEP1Ƹ5{8eӟOTGh= #{ʳd*ž)gli x2mϷ;"xcg-El'Nfu38Ndb>ts*o쳦$ݛ0̼*$y><k\{{<_8vtxM&x҈!+5˧_@#7an7Eχ'VKwxO7]4Rf1b)+7Q5hB1M ;!:hk0ΆYJxqcYKxKW1+t/T奆!S(ZʠK ^#QmvWq60!O8v檮<®NR6y0{C{MFJ<$0]ՎI< R!:+$O:4'ZxF /U/i t69|Q AC IDAT7lU{?;p&̳ivtu5CrPzS{9^Y=^|` )(RaL%}zB@ ǡo{},+h" +6BY@BjO%Y@oXWJ@eϪҚBBfө?I9ɽS>}繉|}}~ 1o;tbkEi-(icxxS> -x1z.!{as)@T)}Z_k<<3"Sˇ<$Nd{^򼬓MeƒcվN,'$ 5j[+cxu>׿-;IEɓ8fYU͂ȟ  fnP)F臆X7R=u & x*,^gަ> <$8l>Jk DImU]I7T02P念jO}קdVgWۏx4(uk4{O&EMbW+c jQb^mPv8.*#+ O O1ާ㕱5imvp\vB׀QxZPjJ%8O@mʣŕxzaZ< O}+ ~!=/~clޤ5j 8TzIyt)C؅o>Px@X}"܅78f=UN'7c`S ;? ^V-:' 3"v* PRr9t 4xW}&eɏ}L2^uk5J!j˧)ErMY@rEf[X'V5\9Ol+ #e9o(i0ܭᭃW?n'xqy ^firz<]p`TzȊ$=[y2<oĥqaTVz.G޳ {#&~C0` 3qx7J7߃HɒE9C<>*Xuw ?igx52<8e}eНpM<$xyGx|ZMxjOOq ^\?<|ڀS޺Oʄ_-]rxC޺D)Ch,-Oݟ-xk_&ZT|G!M*mް Äw|x o6dp|xyxqAcD//aQ]0NxBwcBЖޱ w*R,ax^;T=<-ώ$oڻOTЃ/,𺛯v{UF=/-=LTen_^cT.{$ 5#;x^4_bu xOBօv[52??=t5 Gx45O~PyާitGxb^^Y YdP/jm\ԣ^E BYtAOC%-cAܮj*\%Lj'jJb"ʎއJ[hJ̶P_bux?l]f',x73 O~$Xi q/1@n{3{\fV4·P6xn[wKL<4\p3aGoZ~^[rVxس4wGs1eq2eV0t{z^[^[עںΆ7 6xʁ-%_@9&T>Y,6xbVV)U+g#*"^7[ O8ޤֺ7kT퍥a<&t/mS?TZR3Jn~zݲxL{$u5[Kް3FYCow#ns1>9IW?Mr pVzq*"C Yy߭i]v.dw_~h GlJᙆn㯈ڹLٟFKЛ1|jV`80_f 7zRq[GT$֯]v}D<05sw#S^!o;)^!_]{tN^>N /烯pIiƥ~QGD"vc$ <#ӟ=̹Ox/,G!ճ9+Uz GdjF/_18𔌳^O,%_􈋝xb 1|XG|Pv >|^r~xᛳ5|^뫉,恚KcE~S&bdq>W}JԌ9*t% Ѯc_Yjb>xv. 1MѮc, 5_ %^Nm؜/rv_-'{$s߳X[ޮJ 1J!6:yxټd?&>yxU<: ^./ b2 OKP zI rG1O>Ĉ69j5g[ /]FMwd? \^S#jjI%~/쇗%Gӓt^t?d1Ok"k\*EY-b%khxT4/S /UNѯ,x1stۦj8 ^&/qaCx@Tuq3RREYA贾ZAS5IRFDzN翪9>i'x5H3YCjj}Y֖/%@zx}oo/?~[D/9=Fna$zq#}MQ}>2𺉁^@롦%ъ7lbtc?TDM//ivДg#dq0CB> ^M.5v%!xIq;<5`v_ǯX20.9kQi}.6KEy/:h;~%sfUݫ0uKm M7Ke0x];w ψ2J𒡮~7"tn>7ACdd Pൡ6m8U+ğ{y{ #O݂K_Sho_Zʒ}t!16\Y6w7:HK]qAJ:hfJy +G' g{n.jP 6xI-/5AF^qT_v -`@g?HeqxH/zU;. I-WkgA*ݬ}m=¬=tGP|vg ȣWE* 7%GTy,1H  <fQ5WrTbqx-*oI)&ԘV|Zm0nK`.n ɛ/%A58t) ›@?(q:TSїB; ѠoG+A|zE <BVᦹ л%l?n#Jq3 rK* V\NxA0cKW*x@~)jliq} xzx_ U *F:<77DP_là/{߱k 뫠>ĞAqECLz8[ xA۱bwqG$=1>WAECGw,@bE"[_ w ^/!/) SP`5OҤEu*;z-3f#s~sz-pBw^xI[~K"}A|=x0CvaeZVxI;75bK}7S75HX^pSoj(&yO4 / $Jl,Rn%d}$x#]&đTP\٥=9qk`.%m]b_9KD PmJsvFx šKmZ&j6 v7Ư/|D~xe%~-<&B%95"Tr9%0lu1bxhhN1#£ ^v:5ĠKVZ0<2!^  L: ukm~ n*/INx5IH~/IP}ɣ3_  fq_ Qx˃>wJxs1<^@/'<[ƒ 5\/&ql,~5&ctA#‹.kxG װ-jx,';%R1(jxTdT@:̅EFTx%q:3cRx/Yg4x^YZ"9f/0n{Њbڐ}jCgV$(ZK-~X:$Ra *x !6>x>Cz.B71ύH)xN!g^/u@S- .jCb瀗HiRnDjk=Itv]Ie6ГŮ[ ^^bBФtއך}w& {v^/q8lx'Wm)͡ l|+5iLZ=uk78>Ln'2Q>yj$x%C'5^xi!!Dd7NAx O,D/m֔il < rkS ҆r ] ʂHxOOj>Iŀ:;%O`<]G5&KLxQlx=qlx6NnI׫XnOȫ$/)1V<C%΄X7$/j$x>xOx)B5I#vOG UowӁ-"xU hQ Ó 5xc%!%qg\, uw³ 6sY%+ 4P΂w[<5i-oz #ikᙲ[H)}&Q"0Ϋt8`joj}I 1Aнalx ]x2"Ko$v{J>>;51*|9毼ޠ6DG']ڭ f <>l IDAT`Fiݲž@voH7xxכ[{عCL;ۺT}_=-F: xC4U-n$\xr||5jLB|{H>چ7(x*5<; @f| oпwUgFtJlo˒^/?ߤsHMP'p1l1 thڍϤWhx2n tTxƆlPz9 BI ݁wHU x2δSnSX[b b͡TKTFd>tNcXo^*CZo$u_ ]esЎc¿:1P-QK4ӵooHg1`>x_PyۭY]Clm. K mۢ9#}1ݳpzimտ̓MbSN7CA1=rώ_ϒk[a_-Ħ2\ iUx{N7bxRwāO&7wL n EƧ~IxGdUTfDw-Jx`OH8'҂@Wۻ RzW s<߮yIau>-##aP P+yǏ ϛE-Iǎy O_:xV4-M͢{ WOcvxy^`e|Qk<"r%}ؠF[Gί>$>A߾xr=6Z"jR]I 7BC.15oG~ÛHZw!BMkѩ/BQ{-zJUGvw6|wx6 Fz D0ywG`wz:/rFkc}A^͛MoȽFG'Ë-*b2xɱq>ysx,㓢vMOT!9*WK>~7:4Sr1bj]]T*K>-I{2󪖗zfS#)o/塶3 îN|vu[ܹswv[J|o ~YSeM2eo :< |5xIٍrUKkᑧSϚC{KW?AM]Q.{K]iO5x f,~<i jEǧI6x&}kyh^<#b0.rk1) dxiM"7"]d7%&=b.wF@7oIZJA2q <3pgps_ ^?5YVJٮdwG$.(kvn%ՑUk"VfYJnaqK W(t[pٍx u$xhXLG9[`MZDz:*Hx_Կ7ةp`lh]/q\\kW6%}%xa7Oh"AG=0WaK y޷K[(jI6xG=E%z$+EnJn^{T)-Mggaζn1-"s xc L^<فdw7^'K9!yMg Oľ=q~!%E& oeL_<5g7W y\=Q.;@wWY 'F -_/ m7rHx`̴ô+֯6r )Y1\CGr·:B$ڿҕ^x=fh:͋< 5+_OwuL:ё< q p wo<GTC < * NGXWЉ"jK&xdx=qD7WAQ 㼒'My}hT@KK ON ukh1pp032Njԟ6@WuluQ x{j/y2BI~Mx,ǵ>)*(CzE$x RY $<(x<I%bM{yvlOLF]DŽx i ڔqe?Fe  Pz0YRũ|>2;b#c `d<6L1b{Db; L0AΆ gRsDG}Pm.x#X . /@mtd^my^nLYyܼ oC n ΄q()bLg4_%$]+j}-Olbn{*y"zA4Sy0+AN΃sh& $7G'j9ܼBWYʼn ᥜNuow<="xZ3r.:@c_fLj<ڔsѩ^M1gP>2N0cR\x˚mS=,7/IPCB^drA Kf A%>HQ5%E"K(i ݶPm4^C%.xYXK<wMfxx>cfSB<ρ[iK=om c nA^[i &].xܕ}4儭cQSy7TIİ%xK:wC$K = DݿAuф%J 3]Z-z[U|$=ASN_l5agy!;҇`9]><mX|ۨWF!=4xȷy)Rd/yY 3<ePw!Dgw&y^xsvAxSkA877$,;^AWyO^Sj_XQg7d_eL!u3#[sMM#]: Wu=<ᅷ>#$ߍxhFviZb2-o+dAIiA== /=M ق7&8jVQB0D_e:/@>ՓsH=^GG OYܼOT?6Zݓ𤮓xVp3O AA{^ͳFW ^c['V V%W`n xO"InO<o DyrPnNx!;:tٮ4Q|=@[ /X@ C݇_,1&F8z59|TNJ<6O@^ oOS5!7Ox=RUA쁇6tx-bMKNx!7OGxzU όm˲v"k19-{+ubJxw\܆'m1"+Zx/x.=9bx#r(8'Њd޳J'-L$n &#Z C^,}hx:x2UpQ)Pr)Bg_Se#6x%<4ЌZm6iz ^mLG9xP<xFpi^͐^-9brFxFxllx js b:wFxh3ht‬4+(,/t^W>~Iy,>w_zϡH#yf? ]4lkoHOXox:b}\oD(*_ /3Cv~xo`1wZ^xEY^m|d=R۞ P0 wiK!yIo܇ 󪼌nm!^  a򪼌nכQ+DSzeFBdLpy>x쐈Ix?JiFق''.5em ^;»),k\96~^1Jwò SJF{?[Yl><) ^' xc4Xolg HBHj$ a7Ϟt]tvl?C5u΅w[ҍBݼ0iy&Sࡃ6v^Kj#a1D/o`~?>n:]ͶBlQ}$҅>/L^\ O%=m O̱=ڪ dVx`>o`y5qDmZWx?=[ܨ 9LǕ}/^ҧ:lu j3.z19˧(5f Io .; ϤM%o:V«lxjlBX(/moDJ GløtQaPc߯&-[umb]$f7l|[V͞p޳.C= ڏɏ|*yv"U쨕pDdZ> uVwo֣Ն`O/̌nρax8:Gt~̙VbQc ^gU#fCsKÛ$q좃5nOKfz,__yڥAs߃ և^5F;'ë:B\7D..4Yy51\qbt??mO_yw=ra_°ozcq$/ M];t #+on^[!u5\v*.}71l ȿ9ϿaO"h0:VR.Gug^.[z2>QxI6E&ώv}Vk#SDM]^O)&VĖXڕE8{k%J5-~|nK2%/kR`0G QZө]o_cG] @rxiұޫD^:ntQAxP`f~VW/H1tKr|zP0M#̴@/{UWppp#D x,55?Df9 IDAT~趝5&ъIP [qgzee<9|7YNl&8_0r7x>r^`YfV5zx2a]L:Jzk/0rxmd.XA>#ry>rBi7H0{g3)| LN{t2MGqг}Sr1EH| r^ȵNZ[|/?UΡ^*z78ny| 2_ /go ,"WvVBEzߑ#Og\ v<0He GkkB r<(Dž#rW ձOz)!xhMm kx{eB HDLКڴxd7x7h,sYr!,ޱӸ49#;zx# 6-Vٌ֖]^ 'zO|^R˞蓴RxRNǃGjmvn@ɡIIką!SI@ GPJs>SQxçւF%@Mû xhcEr؋#ݕ ndGQ_)GDPJthb9-)ٙ`/IF=QGU>yHԏRW9maGԳuݖ_ f38K\c8.0c9ĵ 5&ЋWj;g‹ty#S{S/Æt΃?%;xE?ӁE(0"*0k C rԥ&lYs HY ͻjY'6\*G! CCzӻ^yO.x;m^',Wi `0>}mq_`L1^!ZoqҸ iۈ qmyyݗS"t4Z]?eΖ?r *AO <1& X[2#wuk8t3ޠvZ꬙?q1;sU;gDeyT7Uع/<VYr+*ܙbVB731xZj@y-/wR"|f{re̓p=Tz3&tщI{OQsո#ւJڲp^SfWĂ7 c 3^{3+G c2 1Ob{cS7>͋ 5&KϟY~6>寵Fo}$i.u(7nރWӖ*ڢc$W+#vpo捣2G찊 OoyjxV}v^.sF 9xgқ*5w)O',xQnwFix7Ov8݌@&/Ó[; xmqQt;NmDޘoPnLjg z15)<x /=28p3Uwcʵ[fb`6x]gUK"P?;s&&֣xp7&u#sx;~ȝFCċ Md*AjGf # O谅>j҂yi;xC~[|d82/dkyzyXG7%p?7&0jCmu ?[Y$Nxު3tle/K8ި=s ?"^xl[Ւ֪L|e5E`ʖcI6@rMu >q}\]8>e.}+<x (5?sO>}ҪZΆli%m=6fxGXnTqx܀›نp GԡLXT~lۚzoKbtvmA7?<@Gnw[|xwyXт0X=ڵ$s _yNxhOg/$޸fތg۷<oNIP;ǾYۧS#6lxrj ޴&1N˫`^a:>:I|my2L:YOk;z ^mi>/ Z G|K<8$ 1L:4g\-3jZeıTe9ƐRKFbLSYO?]AoXmZ­2& 1'T%35${O 5~˵V,EONxfmխ7Zf <g`[<}4K^ݛO o]S s~I.8r}[j^֔uA5OjUTaぷ-x]S^z>8w'|Ԓŕk_7?5>:y`{PdKx,Ze;ze͵TMܮKN),|s v›-x*C\}xq|18z[~k[iy J%UڋGoӒU+ X:1Xv F2yIJI>dx/ k`Ysyxn}H*5%Kz98bjG_%<O޸`D_VAM نgf4Co/FUlc?v_|1%`i _spAEH$Z y-kx{AUn.:E ["jxWx~k˛>'tK7:CGo˲W91ȈxsykY9I*6_2]d)mM)m ^7m%xʄW3?#utʑV>0m5^uݴZ<^|!x-=8jiWEeШכ  xA{AU\PL{w{x |xż|rcg2kb|x)Eh;V,uzx~7떚! 2^,9֙ѯ O}w9"i vy<>:Tx֪vkv|x+xxJYf"z2دL˫VxEc2*v G-U&+Ǵj<<܅{o/<]1ZJ[5G50{kxw-2}Ë <#ec6_52MW: {Mon=|mxih&?OP>z5 ճ+$7&j/(_lQcKUgcIMPj_[S߸kSJ onCsQzj+]4zŚ E/${RV~`^F0,Jnx 6[Vܙb 62؛U~î^x^ҕ\QzO~Efpń*jt b߰!pаX3㔒)&b*zӑoZ}.j#bGxK(y+x | &'ШoS|5xl) = ,F<"0n~< ֬$ *GwViX C$͖̯I٫GJ+mø`d٭x{h-r<^Bqh`3ch?>Ox, :D}!,KZ$?zf 5y\,MC6%dW3n^Е~w `E-Qs'6o.B^R ҏjv"x:34pEhV}QQDH? +-qENaF-1 7 ԃd0n&9 aawjy/A^E/gO`OJxg~~-x򼚜:L|T5f[Pި=G%WN: <z|Z s<8ZS(3HQQ5h)=oMO .<޾a.<[+d[y LՒojI*=^-0q[ ꜺAN7ϚGf~ɖLk\2u& w'TB˯][;媌<<Žz뗃K7{+xn KۂoнpĨtI$JԦ5PZxJl/o!oQT/u<<&=㝐@*x2W3Bj*.J O=^ \Y x($jy($cF}'PHNt(W= ˳Nn11L 7^'`+"zqBYӕslP:/X8} ֞^o´w7Vwx\~1.{/xuP-6wlvL2~!&TS,i Iw U; (,ccwh=DяQ{BvܠbOќZx܀޵<ȵ0[M<چlRNx*Qfyr`sQYƒҼ2+ x)]k,a4 F/AO`pWTV<)^#1π@"Htx <0!;JP`ݫ<,C*Mg[* Fen')CSSa89=z|QTs}׹y!FWI2" (GsUWʨ )A$i1&Yb4,؋<`.p3)v)l98A*:Kc\bp EkUb7 GKAD(4+' y.׊ #w0tkazShۘk.̵0=9mMPa/-)0<K8T͖.gdll!*RY#_B=!*ٲk":So0[*kdKp*mMyq՛ *lyW @xfe KetڷQy4#<WzkiZkbO' ):_*k+]<)6Qx~>L{3F=mdn3Kj8[ׇWN*Ҝ2:x+GY=ѵ6d27rTAE>4gls6W%;"@z->]+gv ĻZD^uw)٘`k=DixMXU CߺZTVvecRɺ~d06<첛^ o7~*8JC ?Z+o)AY٘xuYuTo!( %_5%Ix5%1FlLT3V}&^1q:g%iiaW!M6&k LlLW1n?6! xwv8w6&Z l sgcLGARv<(;7w)٘ ^ Lo/%=dW~+xcmF ^4r|7x1{:/x<.{xjrZ 'UwFtFMZߝw8/v[Ԙ_s#< L:}OxɅ ';;] <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!C <x )R1cHǐ!?K?IENDB`Dd f/=<  C Ab)x>?j .hwn`)x>?j .PNG  IHDR SgAMAPLTEٟ pHYs.#.#x?v IDATxMșԈ, ֢o0[Z@/zwe\Y\/ \x1F'i,ʻŋ v#A x&`D03OĿPdF2^AEnGVg@<x (3Pg@<x (3Pg@<x (3Pg@<x (3Pg@<x (3Pg@<x (3Pg@<x (3Pg@]2S:bwxLik#wxLik.yqYH ^,)-xdGhBDo}xA#UN$8h 2LH 4+DJ"yx)ypp;ƽ'R*Pb jqeb*|C~\(48XŘHo Ju C'R@[Zj"`(6>i7J kb_(߆IT0m2z6L-1jʠ7P 5TRa (>Qe@)HKh^R)E7H^/x" _vH^okmH*DDRnS@R> UgnS@R xʆ)AJzR$=:[BJB@j!%xa@ 70RǍ6/D<0z] )(D=@p1p]Np(h^K)k>rizAn!X<>;r14=eӰAH/5x% ^ `6M#5x<x)z h聤˅{&ǔB ËeXMK^1=0R7.,O aCH0zt*"v0z5 -ziK0Z]^!WFa^"t$.2 z$F/]Ws z$Дo^z*e nKӚt.0R#zMV]p4=T+Y P*N+b8#8l)RBGt5슄R7LpxEXRǍ^K27DVP*1;4ROpE,UR{3UBR'&8F:ࡔ 2oa@\]}O\l N{aR'd a B.uxv&? (uxbGljY]Fz2nY>gBDO؇W0 x" -KXחvc`Y¡.|J2zZG8ػُҁ]rò^:KnHK dyD y80Cc6g!*w,5<=䱕#(iáhh  QII^#eLu 5}84iup:>4MO{|ZNJp(<څ.no'iFy/wǑ<ѫ>'ϣZZĒ唟i{cnjcxZ,Oo^C:O8І6*돨 <=xEDDBg,=xPX^0z%QYqGޛMx<8ՄBN;' h$k} <$o| oQ^C~_ v{]x8%Y-={Uh`R|Ӆ7=#RJ'j FcOKk<#zG4ٙJ^9z~67zp҆7ob mx+V+у1R<_٭%iH҇'md2 4%^M0Z[- WR$=phu񼵠G#RGqx%~FJ_!ȁUn;IJS21x"R{{s8z8@ѫa|#$%á8|Op<o4zz<?xh 5U;8|+#wp< /Q Ӏá3izW/#%GĮON-atl%Ӕ򂇠 Q`33[㱏& 4 |<3^?qxxgfGǥioX =<U?l1sFXP2"`\2 y(+3zFzW?$d /^o#x%w|3n:{gA(d6Fxſ<>kȿ<>ӹؿMxk%" ioZӻ)`2Zgv02f%'e2m$qd眵.!ahhS< %ؕƳ!aܛ*ۙXrZ& 뷿loULcpbmsd^P2]3q.OCbo8?boXS17S17o9a7>|WAx^mc)e^NU:Bp |^>      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`bcdefghijklmnopqrstuvwxyz{|}~._1=\装H0z?%3Qd y56Fā6)y7S<QKtèW_Zw0lްEwOP]2󊄑ـ{7t*G؀'{&mc ^RO.يǫXǍC ?d+*~f&{WPxag .[on-.>.>}'Z]2?\o}exîHx~ex+:X+۷w/7?6~W$[ua^9փR?^E<] _B'-`?J'_?Iw%8V$ӓ8&;"+RG`6S5.=YϬپ}~FOW<J=\%Ô7, ]r7yòCCP͟sEqW0}t=^^MݬѭZ~?.XI鉯b{Skr^ޓf{ Y\.oZqOdvQ刨&Pa;ptOʏ~YCwI$ǽ6ۋ 3îwo8B ^ls颇'Ɔ!jmXrl"Hk9It9d83ՃV{Nˀ6l!cf='idY}~ 9 Jj9n\@r/GRW-jxc)ƫJx͓O\_56uˀ6Эׯۮd~_ZÜ%ޢZi[(F < HjV9UPnO =ͫW7k6cB9Q馿-)o>6 _^X~='&B;_d峻u"RsTK~%Oۖ[(/קR9áW 9*# GK^y4QMq9á6[ozюջY<W[ GRݰkX{Cx9W{lohVRzs!G[l}Jҡ9Cx)m޺Jfu\u'%.#(޲Yde5Rm`NiG[Uzjn2\+eV^^@rzHuJ\TW;ިdQb7zm*=i4LR\n?UM'O[+xNwR]n%Um[to:%+;Vƍz=fe!6(q<Tji?! ]򬒓̚z~_ք] Rc+HH7f(2}S. K'=muT@ SkyM%ܘTB'谄yU R@ 8f)SقN=TTx3I!vM'6jap^«ej?9[ "l|B:fc!v XKNlnC85Cx|\:=W}+W[ `Oyͭ}Z- .o:eQJuduS,9M/M'gUղ^5@d})Zx"< <شF%<2} Lq;OgP\bD$ѷ5f&5 >1C<u?3LtF<Aa3V򧻗HjȠ.3e_skeTga_sk`΅gKw?3$ൎf|'$g!+ME[U[dd=<M{Ym_|3#&2`4{x.<ׯC5^f&5OR; i bꞆ׻W%7{Jgz˾s?鵌3s_g:gCp8:g?&դ78 qT>x6-^s$3Xa q /~{b653$,@~-znkV^Ѥ OTĒFv&Tzg%( 0eY)xj^q/uz}6tׯŨmu"QǘW@Y{Ux“Emkx*r<__+o_Rmn¼*(zqtd=asVoVks͛^;70-Y{5 lLt O0kx:<_MMFV/nRSsxCUWK{+^v1yoo'0g߫+Gx EByHs tV$k%lkgFvn%<{ӜF: <чPux)(s O2=[{d|$x<ӤwoiLn['KCYR9k7'>YSUMW]Wkx5?x9]W#ɏF '3xÐl%M8]<>oW?$TmZIRoÏvg1ab[h&Od@P+:=xe |9z<"!l-xcuTLʏ {E\, /jRڞ-獿k]wॴg#I PK#`]wMGMO1ϤV_ro*x0s)>?lyrxV z=NmU?+JiJ%өGXԃGɦBޅKi3#HS+$=̼ HS*W#vI Cj+dj^'7#<w/؉'Y xl5.Q= ٖzsm}|!5n^Óa2 Iߘ^۬`j10-/_1o^Z _ /R]m]'bvqn<(VIO'x]ux2WwU>x ؖ}_|>k7:SxGè3#pKxv?R:|#IKxX K+=t㏋YYH=U׈ -%uo /*GJ̒ u>9.j^&~m<)ĻrhiLOV#$mڼux{!_“[y^m6l2x%ckzxĬ^}7m hXmx~56 ^&_?i~ 6/]miuY;.ڀdUx;Wo:1*<>L[y{-xtkv*k+x㎫whJ^uiTdwjZ ëxՉ^+ ZWJ⻱*նSQڮë]nExSQڮ-8Vg ^ 6f0k˿غeK#x;NEޏ¿t};mx-0Mo޶2%SMmN/;mMKu|<;;KKikslf)j ٱy삭D{]sU/^cxZqKo^cDߗ|B#Gݶ߅-_+Pؖ;NN3UûU^dDںZx6<~HooڱJx xv]hGo2eq5xDjv-S2ޮGހ2n ؎Mywk۱|Cw;{@7O I?^=9\Z8rq F9+yNG՘ExV^u)+^ 63lx?^)[Άx  w7Ti1UxfFqY>GF ^9ɴ|?x$g<7l>ہ]lGx>U<_ Mj>ToQmUIWϟqYՇǵk^~"lkZqCѷg ^^ڹ-H}g/ux/y!Yy~f{\!6d07kx: bW囂g`) / [³5pc^?_l2LGڞ+Oqxfgpt pvӠ@n&d-a}r ReemGgoŁ8t <ٛbz|g+x]³u/IRNgIm܅ ]<^9h 2{Xy$x6>]x\#N {=5=xF0)lmaXjh^ gld mRYۂ %ͻWw G\3oy c Ir;%<\3/$MjzDyr/3C΁g^HXdMsx2TLe=^r ;$2kkv6Z^pNg6\tmOgˏIa.g:I|3ܺ u`ky PeϤw0np<".`lx<Cwf o<'3=tbBg3 bdt۱x37][`eu\ڂB<#Ԥ3wnzg{L)_-٘3?nzg9\x{Cnzg1Qg;n] t\u0=ޱ,#|/W'u g<{cp=1[u[pcZ΁wV.I;V{ ƒ Owt7-;f:yd|Z΁wV9rZNw$9P#fς]t5U.=X8/K|p@. :8>2'k4nnvFeؓ82`[>+'dQ`AE3Ug+2W Ce.;&g*nmޑ@]Ƴ#673Ӯ\F@ezEss\P0fV:6`48 ^±;.%<7rͣád7gj ^CE]rV`Tmޡ|Q$D3'=.I*m!KN%͈sr(KNC"͎srIZ7NbF"͎ #$ Jw*slx?mד~Do@KS}iGgʡPe]rbeʁa9e3B5+zmOi K7ܟΠv8FnX:c<rNsS*W{ x: ݤ_Ug VӝTWj]~e^Өbړz5'쀛#3HsH.qQa#s!HJ{x%ïX,OG^T*'͚ڥ|^=Sonu)J}>tbjzy2r)ψ*.q Evb[*ӞtTZ2^=ڈ^+Mkx%/˕]1*OQє^h}M+ETxTlǥWz='[K6UB]OG /Hu+%i2jMw*ϿֶcUrN\ˣ53x  KVK}b{Y]fH(rmkwo6L6WUXjZWL+b2KuZ&hO_RcP/]^Bɶ)c2kxﻭAGl//z`ëL\sZxckڽJ*OzJ xe\-MWQ3ҷjl2դY+QT`87VM#RzN}EPx$683RJr x4[uLwz(PY\Y4Fv;fS^Vdx]qJfv])55v inPUz2+h^7W+:v7̑}U*7}QXOyv` [S+:imOG o]YTV1; ݻ.~Q*F@n5h,V݇ӄ^{UxBn RݟOZK{4"(YjްUX}IvUxüza o+߲7p׏gteyĔGy Vq>ӓMxPf6OOv响֩&#m;R N!ʩXKͩ W@w- e xU ' wN^Mj,9sx1{'[2upg) h'R ) x5IZY@x7b^Iֳ%{\2hd/gK,'Tk'"_W̩y{KCbޅYS/<φΨx1[@ʎK9<T,0, Q . Q;dA]S?bmZ=Ӧ HYƲ]@t:ҩؙSz/GNyT,cVX> ~~_θ?pEn_@: |t*V֞5ΨUZ&lֻI> +0b_SAo>:[(Kb )kt*yTljAv>1x&t*c/R&V*(Vq7@^:y&&1/c*F ƤөF\:xXP=:!^:xؾz8u^7q 00TO y8e^Ku*&繷1#xh`Cg vr^v4r^ S3A6(q)x1dxkk2\ܩD=%+%Q5 uMk|#II2VAp[r^G-ɡdž(rwK(ڼ`[23RcTئax1澒D«pGؚ׹ܨ {: $ov*Ge-ש+3 Sj˴Rx3@&s@0Wᖃ><x~u6)<xllLL28 ejľ:Sx^<#x=` 3=Cx>=tFhdw*GL2@.sQ=h$ )mxu^&`J$Q"MxUVu`K\n&22f./+:ՅakI!. Q]xtALHƒU /&Vy R/xo"`68\ Ճx JXt{!B,~u62“j‹Q԰ 6O^»f0!>8 /GQW :O ^ƏSt*<-x6Yp*(xߋipΒ ӑ'@J|ҀrN%Ӏ'!ҁS>b*B:6|;TtॴN3lb*( x%_lH.憰H^~rb*B腅S\x¥.pjYazTt yb*m)nmld& ?[4LG7͂S7?/ß!" C >LGJyO*<ąF/T),8^:,<b*1`|Ep*ԋ-Fp*4kςSR*,Reu!2HyLj;[6o*Rb*+R SYJH "`h*x`QR=TV2n˲_p*kڛ1ϺT[8";K*9aۼU`Fj^gjwfhE78UN>h3X_Jͺޚ",^3Tj7CAS%$: ɤzMp*+R;&8Ucʪ  v 1u)%ʺ Ne]*{"L BX@&xȤ/8">TڇFRʪ"ȴ/,j^p*R)".Tڅb*r)NE]xȥ2'8vhRD{`SِʜT$R *uI/ mH$`dR8%< NEȥpKp*2\ 9/;%!(:/?%!($yH/8HU NEx^zF2Sd<0NIcjT.yOiX7~D':e#7_^M8&;f\*+%@zFY%^xh,fȏWd7wyw eԦtw Ԓo*O+:+~wmv|2}ˢ );$o=0xuN?{ ^Y/+Nw3 Lxx&ab|;<<%Y4y]U)=<'J5.ϰ#^O^ /SIsuҺO߉x{ZSj,@vJD` Sfw1" x Ϛ5$eJHmT,.*"kJg-Tu:MT3xHٓ- IKϢ+XZ71I+_qi KXCc9LeN&?a0H xo/OUV\x0e|0jN׀--cZe$?R$sKa* R_:9k3jd^׭}f Rȓ: Q fADxjx>w17iVYQi?e4?eӂ)#4QX/!;gV d N}aQO6!f.iyi?M ؗBw2185+npeZj?u%6"I?aKC$IA*i6YcK|F>c;^Q X"<SxmBR@_*2V a >>hX>-՟Ș ˴ ^%C[@To =ן]1&I_2'i)ּSxH#V~N<THػlZ }t J | ^^~E$cd% ױO&xps޵Y,BF.E @5ʲgWyÇƜNkY}d5?$ *V6?C3(i*/U"]͊KL]>2^ ίY*q1M{ŲW A?6Tc7ܗ5$dG^›%zc^G{DE9>Z|1Dz$ 8<Ⱦ`/qX1J{fY2& 3/^?*+*6cjZRXeY <0w7$LBƦ9:Ha bw8(0SxUGd7b a+$*Vly(j*`F~М_V㷳iAaA5u֖Vk#<*9:Aa!#5$yFx/ S*ǰg k%J4vU^5oEZ f ^m~ /02td ֗)+-7` 7]|lOtx/k2O4<ФyǾ1z9 |%<`鈛L _o5WСa ^WrxbO9< 1˳l&px),Kxt{x9}0KxhũJ?&a]kM|Hzxɟ^^{x#f`6Q `g4P^%؃s({8}֯{x-Ɠ<(P ^ /^? vk8""ᕬ*HK^\j dd>Cjcۦ]N{We-aDطE WJJf*:h>`x}΋D+(7CN烸hݛٗ5=8ܮW ͥR V/CH*ny}ꤡ.Ń@ #<*oXs@0{1/،48z\Ur]h5 isF(0)d_Daxp(^&c|Q/Qea{xOwDb0F8%oG~z)!VxQ80x˔+*\j~%!ൻ} IDAT=H/)f)SW=^E`E T8]oUqA~+.ϡ..-YS`Ut^ϰ~`K$Zb4{{W 23Β5)! IJ.Ib?5 yND]L ``2>j8b{$߰uaH,1E"Ҹap ?0B% `}UnL~yɲ(!fR$%*K}isq Kb?b%@T1%H2A eIOKfj$GbqN_ПqM`}&Iͺ?;²d \7ad2'@~`Z}BL9S(Nu }\2;?qo`jCN8 P!-Lu½YIK]oYyL)#`BʪS%a7ռʃ|51 "4Px^EO+oja sx+8C{'lQl]q7t֗o_wO'LZ9;x{YK'E y𶧘k|tf{U T!V75 FWU})z^?,G'\g.~{Ov5WX$\GuDةxC"޻馷3>l*u ʲigŰeɐ6+%sr0or1ln׫~~t ۜ/6UR/If>Lv୏A>.:kzy+Su+72-/b$6>Kx{xٖ38wut!/;4o~2˕k Y7]v ^ᴀ{O5ZIk0xAUOj3a.u3x7uUYvL>+-lKx#9a:iju殺G5=żnet+%t'L>o=aY;Vw~J<;a)bT&,Q+%IJ8.n"ٰ"-½uD?>8D ?v Jz1gdX\:WW${SXCդI{{V/v(eHK!/8J^Jaz*Qv1/{8!M1}}пcu pJlAxO҆\`2酚|4 `g'&MXo r^ 9,'4'KX%EjO\{ءe_Xn[Pv(bOŵÅ%xse!K\M?_6 ymRl0XOHUwϞ˸_ Ax]LZBO/aZyQE-79zNK,u:#GW 4Wք0x? x-9߃go@`o!ؾ-Y7φ;W^Rgx)Ē:n7y% {Ã9sx%lpx]Q6ի="U73xp;fu+/<.fFTmbY|UGvၓK^3ō0uISx9Wt55$|MXgm2pr9> 5xy9?d r9/iD&*x1MbV}^Y}lTLlR`2PhRQ~ xM^r: LGxL $L2ܨo0hJn`M&^9T6 5f3|7fO~4F?2vAeط%+u^FxL *1tXl+ѷ& L)<x)Pl!g qx%(‹bÅp)Vb5nߌVQ ݳ?xrxh( : u5:ౄ! v /mA8 +xh4^t7]S8Cx%]zD L+_d_b̧ f3x9Ԉ-6(YrxS+E"֩,cCR1nf{ ;sUxO?3?®r$SkOaY 5^ڨ1'9EMŒ ܾ7i0 x2x #_6\ɗ n [j!\;[+LHCP1eyהR~LVUKx V^˵.~ΫUJv˵P"?,43Bkۆxߔg^թC):GV}}o0hg"<xG`j{ys^,rkf:ɍ}$6$gD,U{JRѲUmR#Ґ ?~` <V:˼w,oO<;{5Yw pRFK_c Ιi2ۆ(^?Vtu^2y=" `J%_5I޷Gv7341I8< bN{_W2g -<@̩ۻ{8|1VB'p 'T{'^70 U70TLSrdP85lu[I<$ŝ<Ίdw*bx;Vyq` fM՛Ggg /mUPEd|}0^xqZ՛|F!ߦUC9~9z?nuٶ4uuPm0U9MӤ51柺fÓT2ia^u_2ޫ.PxuU6ǡgwW-ˮxCW-ӚN[qqv"Y‹ l-b o?dD6.W 3^ίY.Y1Ej6IWe>Ç~(KQu\Y$_盪-}EίHrg Y|ub]|S&b&|'O||v3Ai7.iKE8{7_'_9H;PϊULIil(^"+C1?4ŅE0mӝUZwEӮՐvOm,Y+>{sS4}"^\=&M<ŷE"ݬi2έM2QEL|g}ۯ_-Mi0-Ϊ@fy/6_n1ۤQXHmKznwM͋47.컩kHkӼK7n,HfIX̷OS|#jOqS=4tr n뿱o.?-;}.;i{_Nη>㝟aG;-ܺ*s=x wY>>e z. =~'p^$H7O3[|7=#|?O1ޕ{4NHb9i+!nݽ}]F<ִxD/|q&㽻^ZbX{#xF1~l/vעMxuLvA/w2^pw^R=;?>;8Ͻm7Gxo3YwLCf/-iʻ[*N>KsG )Vvv=㴍񎦼b]^I*kd1&߮)M~<>&a9[e8${F3ų]uoMvk-^|n;omr˼wmM?^>{O!vvx[~zLMQn2Z1tpo9~㥙6w[-~xˣ)o˜ӛ)/ox[RqI2$+iBx|gobg\_1pmڼ[Yz12^ /Efmlw ek'b=&9>4J|Vix1rڙ1>kaS^P}z1.xUzV;CRbTͼ"5',c>_Q}Ǵ_[]"->ƚNbڿt/aV+xoZ۶?ƙh2^xyFQO(-Pfe5ߊAMGċ#,E uz(4c.8n|miv;gw!>**AW^,%z{hb1%!ohbxѥ{Żco;?^ Y |)^SW">AeQ< ,7%fQ>ltEӴm|ZؒY&.mV-?bYtلǾ/ë쫊);WWicY-ӮqnES|w,|F,ή"]2i,(M}kKN`(NGxczo^v)8_7i)M[.Nn/gw\Lx YtqWы$'sywO.b۴?p|7mdiѱOhttp://www.amleth.demon.co.uk/DyK $http://www.sohar.com/J1030/appb.htmyK Hhttp://www.sohar.com/J1030/appb.htmDyK $http://www.sohar.com/J1030/appb.htmyK Hhttp://www.sohar.com/J1030/appb.htmDyK $http://www.sohar.com/J1030/appb.htmyK Hhttp://www.sohar.com/J1030/appb.htmDyK $http://www.sohar.com/J1030/appb.htmyK Hhttp://www.sohar.com/J1030/appb.htm$$If!vh5 5<#v #v<:V l0,5 5</ 4$$If!vh5 5<#v #v<:V l05 5</ 4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh5 5<#v #v<:V l05 5<4$$If!vh555#v#v#v:V l4 ```0555/ 4p$$If!vh5h%#vh%:V l405h%/ / 4f4$$If!vh555#v#v#v:V l40555/ 4f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh5h%#vh%:V l405h%4f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh5h%#vh%:V l405h%4f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh5h%#vh%:V l405h%4f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh555#v#v#v:V l405554f4$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  / 4$$If!vh5 55#v #v#v:V l05 55/ 4$$If!vh5 55#v #v#v:V l05 55/ 4$$If!vh5 55#v #v#v:V l05 55/ 4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh55#v#v:V l4055/ /  /  / 4f4$$If!vh55#v#v:V l4055/  / / /  4f4 $$If!vh5 55#v #v#v:V l 05 55/ /  4p$$If!vh5 55#v #v#v:V l05 55/  4$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554$$If!vh5 55#v #v#v:V l05 554Dd$   <  C Ab]=l@Yh$ N+ n]=l@Yh$ NPNG  IHDR$ϝgAMAPLTEٟ pHYs+LIDATx͏UgS;ˇ( پH(EQ1D)4uHqOI)W[ RG Jivܾsmwwޟ=s?#E7S&(W$PjEI^N)#Rz2"2) P,Ȱ0a-ʰ0AJ@%ƥ Y3[(ʟO<Ȁ~)%{.ų`BB*(PV;V(xYZÐj )j )&@ %%bZ ÔZF5 %rT kV 1F%([2{DjT=][yic3b!CA(r -7$K4Haww\W^b(wAe.>(Q Rj (ƵA/v %FAnPLk-ЫFEt;RY% Jn)(1gOqFsVgiE+b̀ϥ7Ke;8d>VHɍǻ MSO@1~gCţ8? ]`QKX~MSl5āQ 肽(dI(mDq 0uʰsQ܉+컺U̧ 2(~\h8?|ƧxWPrzţaFEW(C=Iܮb1}wPu3YoiRO r P n".6La_\`SX%0[N".(s~b ظt]lv֒Na7'ܣh1 (*jpa ];],J-TEsvω}J@RfA=1b1@uIq}s!!+y?bQTDhPL.9lZ@MdsbЅ(]fU^ra% p^K^^MN{5eRb@]x>wg+4C~rW9h#V6Œv’/K)T9720 )!^)$ϙ*~p;^L >LJjS_Qr@ߢOֆNQ M!)FIf(x\w1)Y {RɒZڧpgP/E:OĤ@K(Rڠf QAY0fJ }~S*PL ]St^STn ()X2314ѦٷYHP]\ 6LEѻPw;\C5ҌQkj$(h8JQ#!"Z Q)jCqD)YOOxJSNZb2AEmsÝ K]Pp')KR(Nˉt?o(wDa֮㚥sN)j}$eRmL "+.EM1$SwP2M-(-uv:rv.E޺DѭSLJqu҇)U3 `mק,Cej5nP:vdl&]]J(Fzu5L\ݪO߉4E 2]ݪ[.mp:;7]P o9eK1Zu|?Hpcs_@(FyiD(x[{dY)-)E;L:"I1E[.Qd8mSfhRSEF)aH`7)E0Bنu?m;fMI4e£I#~p8cKŢt v)bg쎘aVUm JQ*:2)8; ,$jF[6Dz>S-֥E-Pɘd_lJRiu鉺paJPz2ܣB&ҝLg4N5]]|J3B!S7"Q>@{߹ҕh4B&V9f&F^J%|?@"L\PlF[{J{|O3IT:bǖt)eA8`Oij:|9]|ϤjLdi鄥]N璢ȋfLQ@/ R*x}(ik.SjW"5`^Hg[6'ƇЭ:f+Ny%gkNi9%Ǭst֔3wimܧdB>he WEK 0%goYVb!M!&B6:QiF!f.4%z ܄o6Oi4ض[{,)#/ NpB61AQ"nS`F- ªJ/B(Ma.];B@hh/.JCR 5E9:^#EBѮdJRIG9j G[רxAdmFjdRWZP{|D('-?tờjB4sb=I)%w(Ic^häH.VP(%2zR2Q {ۓ ͔ 4bGN9́'QjFI$;>J:k$(lzI₊ZS"<52rShh@u)4KWdc줿xziZfCφ(^:;HkƔn(Ͱ,.Een\0JQX.:*(,XRb\YBa (B/K)j1bm2(?쭢ݱl4JNndQ&*)۫%;lqYHiaTQdͣԌ^pdQڦ~$XLIG[*^aټ6 VRImdP.':9Şk%gׯ#6bbBu,barM}R[ֿkOIj?A(^('x;M-F(,JB(W6Pw])UwTP7y>MqXt<754RWlYyLIi#^1xS[/vGM;j-;r3|FkJqw#]®aGbn)C`(Oo!vjBy@S6zIR 憟Yo9"(6"bSp]De6Ӄ<(G"AD]Ղϑ3EI oA){AA(3D")u,:=LMy!ʽM} u(JUcPJCȡ|D,dHqjUnRQȇ[f^2=ϫYR62GYljHy,M1 mЕE!b!/Kr(}QMg$wLI_nnM{b[:Y+i+(9*NxQ[ Jy3>lA$ksJ1H9YR/Ž/ JHRNi) p()͡`-HSP5/蔚ծw)9@!}RQȔWP26@dAxcTcAu(,bE4)[GzAʮElRIpQ^ןՀ^-Qj.li t/|Jr|uFED7E&qUWո PFu_/rbMIxUr~тN蝮pZL52_IGbߗ &\CoCO4)Hi J9MoE(O^IwY'hރw6( HQh]&Q,6%]fo"=`֋;5b?DF%ETA VҧzPH-;ƶ"cƩQ@wpbҡecɔ[GTPʠr4k0BP d!LRZk 7+$ԥ4߬mm9J7fx@&p)ER%i(5l|g=$#u$re`+uu0RĶ>p6)t%JYQQڡ܇)ůB aJJ" Qd&4AJ)IiR3~'JIyMEX':ȌSfy̧xw)z:AZ(eSܐH;t:)8DIb\tG.%xUr _Sfgb>JiSU̒R/8e5$2``QqJrs"JmS[bP)[Yf@c)/t2?;}E-Q(EK)ibي>(ZqwX = ghSQ|Eg)VhP £芣QJ *&E(ȼ QWM٢t2=IQVTؐч,M bEv(8z=$3\wq(e6⺋C(4ty u:R>Lie$$sŗe {ܸCٿSYCrؽhۮ]޽ =&)AYBc)ކ KS1LiNWR)hP\GSp_E)Q P"(eb |hFRIENDB`1Table|u SummaryInformation( DocumentSummaryInformation8 CompObjj8:;68;=>/1367wy{~%'*,-GIKMN!$')*+,V`tuvz~"$&'acfijCEHJKloqst QSVXYnpruvw  Z\_abEGIKLGILNO579;< ,/134LOQSTuwz}~469;<tvxz{ 68:<=acegh  479;<=ak02467NPSUVgilop]_befhj" D   =    3demnPQRr sw?ds]^g    {!|!}!!~"($$ %5%&)())a))!++,c,,-- .,/224 68::+<N<)===>>@A CCzDDD~FHK@M$NANwNuOPPASSSpTTVWXYZU\P_a_`a*a&:KJ*֨ϫHv5A^δ#klεXѶķ=m3 E%,/ntu  Bl|$' -@XhWt 5f2$ X ]vRMpBUe}8Z7r !!~#R&+F@F StandaardOJQJ_HmH sH tH <@< Kop 1$x@&5CJ>@> Kop 2$x@& 56CJ<@< Kop 3$xP@&5CJ>@> Kop 4$xP@& 56CJ6@6 Kop 5 <@&CJ@@@ Kop 6 <@&5CJ\aJLA@L Standaardalinea-lettertypeZi@Z Standaardtabel :V 44 la .k. Geen lijst fOfFormBackground 12pt#CJOJQJ_HmHnHsH tH ujOjFormBackground 8 pt(#CJOJQJ_HmHnHsH tH ubObFormBackground 10ptOJQJ_HmHnHsH tH uD@"D Inhopg 1 ! xx5CJ>@!2> Inhopg 2 ! d`CJJ@BJ Inhopg 3 ! d`x^CJ<@< Inhopg 4 ! X^X<@< Inhopg 5 !  ^ 44 Inhopg 6 ^44 Inhopg 7 ^44 Inhopg 8 x^x44 Inhopg 9 @^@FV@F GevolgdeHyperlink >*B* phJB@J Platte tekst $a$ CJOJQJ6U@6 Hyperlink >*B*phf#@f Lijst met figuren ! p^`p5CJOJQJH"@H  Bijschrift x6CJOJQJ@2@@ Lijst 2 88^8B/@B Lijst! P^`^^@"^ Normaal (web)"dd[$\$B*CJOJQJaJphPP@2P Platte tekst 2#0^`0OJQJJQ@BJ Platte tekst 3$5OJQJ\^J0)@Q0 PaginanummerF@bF Koptekst&$ ! CJOJQJH @rH Voettekst'$ ! CJOJQJ6@6  Voetnoottekst(>&@> VoetnootmarkeringH*^R@^ Platte tekst inspringen 2 *^OJQJN b9w&uw]zI y -"$ N <543210 =?@ABCDEFGHIJRQPONMb9w&uw]zI y -"$   rjl ւ 6 Wm > c9 o` y N $3$\$$$$$#%L%u%%%%"#$%&'()*+,-./>?F0}#R|OzN Z =  l ;  d)cYC`4!KYX]@Q j D!!"Z""#p##G$$%%%f&&5'''f((4)))V**%+++R,,-v--]..H//0n007112b22!333J44&556~66<777Y88099:~::T;;-<<<T==*>>>9???M@@AtAA9BBCUCCDkDDEqEE$FvFF'GG HHHoII1JJ KvKK%LLLBMM NqNNYOO$PPPYQQ'RRRYSSTTTlUUHVV0WWWHXX:YYZZZa[[7\\\V]]+^^^l__7``aZaaarbbQcc0dddXee fyff,gg hhhUiij{jj>kklqll%mmm>nnnEoo-ppqqrr sssVtt3uu vrvv?wwwww@xxyyzkzz {|{{{{{{{||d}}A~~bˀ<yYă<nxI׈JXސɓՓcm7L|ؚbv[xɧ&.@HWXlݩީI{|1_ʫ:;S|ɬ)SmnopqruoϸX=d1@Q@Gk~EFOX jkt}67@I'\LGx 78Z Hs)$,e        * V ` u v w x              ' ( 7 8 I J Q R X \ ] g h            - . K L ] ~             $ % 2 3 < = M N _        /34XY|O2A]7[jo_anwxy%&'I  U V _ h s t   )!*!+!!0"@"Y"Z"["b"k"t"}"""""""""""""""""""""""""""""""4#&&.&S&T&V&r&s&u&z&{&}&&&&&&&&&+'^'(.())++,,.-]-J.t./*234`588[9o9C:Q::::F;d;;;;<7=O=`>{> ?"???@AAARBB1CCCCCCCCCCCCCC DD(D)D*D+D,D-D?DSDTDUDgDhDzDDDDDDDDDDDDDDDDDDDDDEEE E E+E,E-E.E/EKELEMENEOEPEQERESETEUEVEcExEEEEEEEEEEEEEEEEEEFIFIKN~RRRTVVVVVVVVVVVVVVWWWWWY [o]p]]]]]]]^S^q^^^^^_F_`_____```2`V`W`m```AaaaBbbbcFcGctccccdWddeNeeeeeef5f6fLfffggeggh[hvhhhhhiii[iiiDjjjjjkk#k9k:kPkkklRlllmm,mOmPmqmmmmmmmmn n.nQnRnSnnpqrrrr;smЊҋ,fߙPQgɚʚ˚S ~ќ   fgh{|ݝ6a"#>kl͡Ρѡ%;=?ACDG]_acefǢǣ/02~֤פ٤Enѧ]ʫvx߬ #$'?DIRZ[eĭ֭ͭޭ#+38=>?ABCDEFa0BVWXZvwy{!:;?Clmnѳ9Dľyg]g zVWmnpz{~;./048=>X[^aboqsvw68:<=%/CDEIMRSuxz|} !qsvxy.0356@BEGH  8BVWX\`ef35'$*+++,,L-e---.2.|...L///L00#11 2\223{3344415v555(6Z6e6667 77 8y88x;;0=B=>>%@BAYAD#D?DE$GGGG>IZJKfMNOOOOP?P[PQyRRR UVXZ[]*_`bbb0dFd[dxddddddd ee.eAegik moodppqq4stuvfwx2zQ{{|}~LĀrZgӄ=f~>އ#R݈(8Gd X~ՏTw2xʑn7Г/CyДݔ7SYv&fHӚ'vśHԜئ§f\>\=9}>ڲgj "3׼z_(uFOZt 0?qL.k?r4Mo&jI&OO*A^3a@  * el_w23 ()JKwRz'  "C"Y"""h#u###'$%T&|&&&&&&&&&&&&&&&&&&&&&&&&&&''''''''%''')'*'+'4'5'7'8'9'K'L'M'N'O'e'g'h'i'j'}'''''''''''''''''''''''')h**++,0,-~..../P/Q//7080d00001F2c2244Q4v4J6i66g9:<<<?? @F@y@@@AAACEEvIIKKLLNNzPPPRSUVjVVVWXZx[[]=^__`0`M`f`|``` a+abdddddddddddeJeKeqeeeepggikmmnoopp5pFpRpjp}ppppqq4ttkuuuwxxxxz{{Z|[~]GfHŃ}ÅZЉ"8Mgōߍ4PcY{ϓԔC!͘}˜ETx՟ܡ$Aju Y`˧(~R)Nl\*Pѷ[-̼$b/@]vIDRWz_q 63,4Jd xUWy `&' Se/Ib7\@N1@hj)   h  Q <dpDa>i%D]D. !!!!$$$C&0(**+M+j,l,~,,-$-N.b.=01u34P5667789::;<>i@A{BBBCODD6EE=FFFGGG4HqHH&IIIJJJ~K[LL)MMMPPRSSTTIUUVWJX YYDZD[[\\^^^`naaacfg9gIgiiej4llllmmznOpqrrys|uuy7zazozzzzz){\{{{6|~~Cʁ̄ 2iW otђXmΔPȕ7Uhjnqw?&9TcapΡl+ 4Qձڶ˷Pv̺6HXj\ʿIigxwN|!WjHDZ!)9[u5,oS7_/AX.-^z 7Uh u&#4= P K  F  $  <    M7L} ;f,]1m:G:EnocT !"|#$%]'' )$)]*+,,o--v.y/00 22X344456 7u888!9U9o9:#;;;I=>ABB9CC DrDDDExEEEEKFFFF7HHHIoIIJPKKsLLMM[MOQuSTUpVVWWWMYZ\]]^^_w`abscdeeehuiziiikclllmm+n8np'pqsttQuw\yb{n{|}}}~5a#]ӄ8]l`%ɌqWĎPoyђߘhbJқkv֞jßӟE`tʡMaɦڧ7KۮwEmڱ (h t0dY,1qa"N U7M?dVIC41U(|MdEKy/;P~    [t Z3 FU!h+25G! "#+###f$$$$%%&&#'K'T()*r,".q/o01z35A68a:?<[<(>?AxBDDFGfJqK9LyMNRxTjUXVWXYZZ[[S\]!]^7`aee]ffhhjrkkmnToqqtvwz}||v\Ąȅ݅3Iy5Upx =N8ɐߒag+:,"m.v\S}m^-HȽkaD\yc+&GAPEl7gc\rcU {JEJ;   ,z;5qZ/ LxH !")##$$&'(G)m*+,-. 022l33w4556748|88d99H::A;#<<<1=%>l>>?? @@!A:AB>D?DADYD7F,GGIJL+MIMMMMM:NvNNNPSUlWYYZFZZ[\[[]] ___E``>a}a bRc`df`ikFllnpq;ss3t)uuvIx yyzE{|}~~OԂqG̊ߊ(Eӌ}n :j’hә?^#(ȫ<{R˴ʹδoZ:<=d!{.LsH4Ye $FI?1KPdM<[#A!A!,6C)4Ol'      c  Y    M N ^   % a    e        s        z  Z |  [   {   J   ?  ^! ! O" " # 9$ $ % & & `' ~( ) * * * + , , - - f. g. . . / / 0 E0 0 G1 1 f2 2 !3 3 4 4 5 x5 5 .6 6 7 ]8 8 9 9 ~: #; ; ; < ;= = > > > u? ? g@ @ A A B B zC C jD hE E E F F G H I I qJ bK oL M M N bO O P Q R $S S S 6T T 4U LU *V V W wX 8Y Y Z [ 6\ U] ^ ^ ^ ^ ^ _ "_ `_ _ _ ` ` 9a da a a b V އ 4 {  + H U \ l m n o   + \ ͐  / X ; N 1 Y  } ţ ^ y :  " ; ֨ > :  { $ B ޴ ѵ ٶ X  #    6 ̾ 2 r ڿ 4 t } 7 C S W n - K 7 L g h B P Q B P Q a p q : " G H ] W L H    # 2 7 C H \ v T _ p x                             ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ `                           ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > e z * A B r t 6 \ z  % ' >  h 9     H ^  , M p      5 n o ~        2 6" U" & ") + . . }0 4 7 b9 c9 n9 9 9 9 9 ; ; ; ; ; ; < < < < < < = = = > > > ? ? ? @ @ @ kA lA mA B B B B C C C D D D E E E F F F G G G H H H jI kI lI iJ jJ kJ UK VK WK 4L 5L 6L M M M M 4N 5N 9N PN _N hN wN xN {N N O O O O O O P P P P P P ^Q _Q `Q aQ cQ Q _R `R aR bR dR R R R R R R R SS TS US VS XS S RT ST TT UT WT tT T T T T T T T T T T T U U U U U U U #V $V %V &V )V MV ~V V V V V V KX LX MX NX QX {X X X X X X X Y Y Y Y Y Y Z Z Z Z Z EZ Z Z Z Z Z Z \ \ \ \ \ \ ] ] ] ] ] ] ^ ^ ^ ^ ^ ^^ ^ ^ ^ ^ ^ ^ m_ n_ o_ p_ s_ _ -` .` /` 0` n` o` ` ` ` ` ` ` ` ` a a a a Ib Jb Kb Lb c c c c c c c c c c c c ^d _d `d ad d d d d e e e e f f f f 1g 2g 3g 4g g g g g Qh Rh Sh Th h h h h :i ;i ? o p q r     j k l m : ; < = c d e f                 i j k l     N O P Q 2 3 4 5 } ~               R S T U         i j k l ` a b c                                 C D E F                      L M N O ! ! ! ! ! ! ! ! ! ! V" W" X" Y" " " " " f# g# h# i# # # # # $ $ $ $ !$ "$ N$ O$ X$ m$ n$ $ $ $ $ $ $ $ $ % % % % =% K% L% M% N% n% ~% % % % % % % % % % % % % % & & & & & =& L& M& N& O& t& & & & & & & & & & & & & & & & #' $' -' B' C' g' ' ' ' ' ' ' ' ' ' ' ' A( O( P( Q( R( c( q( r( s( t( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ) () )) *) +) A) Q) R) S) T) }) ) ) ) ) ) ) ) ) ) ) * * * * ** :* ;* <* =* >* e* f* o* * * * * * * * * * + + + + + '+ 5+ 6+ 7+ 8+ M+ [+ \+ ]+ ^+ x+ + + + + + + + + + + + + + + + , , , , , (, ), *, +, E, S, T, U, V, y, , , , , , , , , , , , , , , )- 8- 9- :- ;- g- - - - - - - - - - - - - . . . D. ]. ^. h. q. . . . . . . . . . . . . . . . . . / / / / / 2/ @/ A/ B/ C/ [/ i/ j/ k/ l/ / / / / / / / / / / 0 0 0 0 0 ,0 :0 ;0 <0 =0 R0 `0 a0 b0 c0 v0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 +1 ,1 -1 .1 /1 W1 X1 a1 v1 w1 1 1 1 1 1 1 1 1 2 2 2 2 2 ,2 -2 .2 /2 M2 [2 \2 ]2 ^2 v2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 )3 73 83 93 :3 P3 ^3 _3 `3 a3 3 3 3 3 3 3 3 3 3 3 m4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 :5 ;5 <5 =5 [5 \5 ]5 ^5 |5 }5 ~5 5 5 5 5 5 5 5 5 5 5 5 5 5 6 6 6 6 76 86 96 :6 [6 \6 ]6 ^6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 7 7 7 7 ;7 <7 =7 >7 a7 b7 c7 d7 7 7 7 7 7 7 7 7 7 7 7 8 8 8 8 A8 B8 `8 a8 b8 c8 p8 q8 r8 s8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 9 9 9 9 /9 09 19 29 J9 K9 L9 M9 b9 c9 d9 e9 y9 z9 {9 |9 }9 ~9 : : : : : : : : : : : : : : : ; ; ; "; #; $; %; .; /; 0; 1; O; P; X; ; ; ; ; ; ; < < < i< j< r< < < < = = = = = += ,= .= C= D= F= V= W= n= o= = = = = ? ? @ @ A A A B B B B B C D /F .G yH zH H H H H ]I ^I I I RJ SJ J J DK SK TK UK K K ?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstvwxyz{|}~0G 0G 0G 0G 0G 0G 0G 0G 0G 0G 0G 0G 0G 0G 0G (0(0 0 0 0( 0 0 0 0 00r000000000000000000000(0 0 0 0 0 0 0r000000(00000000 0 0 0 0 0 0 0 0 0 0 00 0000000000000 000000000 0000000000000 0000000 000000000 0000000000000 0000000000 00000 00000 0 0 0 0 0 0 0 0 0 0 0 0 0800A0A 0A 0A0A0A 0A 0A 0A 0A0A8000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0800+' 0+' 0+' 0+' 0+' 0+' 0+' 0 +' 0+' 0 +' 0+'( 0 0J. 0J. 0J. 0J. 0J. 0J. 0 J. 0J. 0 J. 0J. 0 J. 0J.@ 0J.@ 0J.@ 0J.@ 0J.@ 0J.@ 0J.A 0J. 0J. 0J. 0J. 0J. 0J. 0J. 0J. 0J. 0J. 0J.(00@ 0@ 0@ 0@ 0@0@0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@0@0@ 0@0@0@ 0@ 0@0@ 0@ 0@0@ 0@ 0@0@ 0@ 0@ 0@ 0@0@ 0@ 0@ 0@ 0@0@ 0@ 0@ 0@ 0@0@ 0@ 0@ 0@ 0@0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@ 0@( 0 0E 0r 0I 0I 0I 0I0 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R0R 0R 0R0R0R 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W 0W0R0Sn0Sn0Sn0Sn 0Sn 0Sn 0Sn 0Sn 0Sn 0Sn(0Sn0s0s 0s 0s 0s 0s 0s 0s 0s 0s 0s 0s 0s0s0s0s(> 0Sn0>0>80>0ҋ0ҋ0ҋ0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ0ҋ0ҋ0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ 0ҋ0ҋ80>060606060606 06 06 06 06 06 06 06 06 0606 06 06 06 06 06 0606 06 06 06 06 06 0606 06 06 06 06 06 0606 06 06 06 06 06 06060606 06 06 06 06 06 06 06 06 06 06 06 06 06 06 0606060680>000000 0 0 0 0 0 0 0 0 00 0 0 0 0 0 00 0 0 0 0 0 00 0 0 0 0 0 00 0 0 0 0 0 000000080> 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F 0F0F80> 0ѳ 0ѳ 0ѳ 0ѳ 0ѳ 0ѳH0ѳ0ľ0ľH0ѳ000H0ѳ000H0ѳ 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 00R0000000 0 0 0 0 0 0 0 0 0 0 0 0 0 000000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0000j0j0j0j0j0 0 0 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 0j0R0R 0R 0R 0R0R0R0R0R 0R 0R0R0R(0R0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0800}!0}!0}!0}!(0R0 %0 %0 %0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 %0 %0 %80 %0-0-0-0-0-0- 0- 0- 0- 0- 0- 0-0-0- 0- 0-0-0-0-0-80 %0D0D0D0D0D0D80 %0AN0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0AN 0 AN 0AN0AN(0R0d0d0d 0d 0dS 0d0dS 0d0dS 0d0dS 0d0dS 0d0dS 0d0d0d0d0d0d80d0u0u0uB 0uB 0uB 0u0u0u0u0u0u80d000000000(0R000800w0w0w0w800+0+0+0+0+K 0+K 0+K 0+K 0+K 0+K 0+K 0+K 0+K 0+K 0 +0+L 0+L 0+L 0+800&0&0&0&(0R000000000000 0 0 0 0 0 0 0 0 0 0 0 0 0 0(0R000j0(00=0= 0!= 0= 0"= 0= 0#= 0= 0= 0$= 0= 0%= 0= 0= 0&= 0'= 0(= 0)= 0*= 0+= 0,= 0-= 0.= 0= 0= 0=80= 0 0 0 080=0 0 0 0 0 0 0 0/ 00 01 0280=000( 0 0  0  0  0  0  0  0 8 0  0 0 0 0 0 0 0 03 04 0500080 000R 0R 0R 0R 0R 0R 00P 0P 0P 0P 0P 0P 00Q 0Q 0Q 0Q 0Q 0Q 0Q 0Q 0 0( 0 08 08 08( 00707(00!0!0!0!0!0!(00+ 0j 0, 06, 0, 0, 0, 07, 0, 0, 0, 0, 0, 08, 0, 0, 0, 0, 09, 0, 0, 0:, 0, 0, 0;, 0, 0<, 0, 0, 0=, 0, 0, 0, 0>, 0, 0?, 0, 0@, 0, 0, 0, 0j 0y8( 0y8 0x; 0Ax; 0x; 0Bx;0x;0x; 0Cx; 0x; 0x;(0y8 0#D 0#D 0#D 0#D(0y80G0G0G0G0G0GT 0GT 0GT 0GT 0GT 0GT 0G0G0G0G( 0y8 0R 0R 0R 0R 0R 0R 0R 0R 0R 0R 0j0b0bM 0bM 0FdbM 0FdbM 0bM 0dbM 0dbM 0dbM 0dbM 0dbM 0dbM 0dbM 0db 0b 0b 0b 0b 0b( 0b0o0oG 0oG 0oG 0oG 0oG 0oG 0oG 0oG 0oG 0oG 0 oG 0 oG 0 oG 0 oG 0 oG 0oG 0oG 0oG 0oG 0oG 0oG 0oG 0oG 0o0o0oH 0oH 0oH 0oH 0oH 0oH 0oH 0oH 0oH 0oH 0 oH 0 oH 0 oH 0 oH 0 oH 0oH 0oH 0oH 0oH 0oH 0oH 0o0o0o0o0o0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0 oI 0 oI 0 oI 0 oI 0 oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0oI 0o0o0o0o0oJ 0oJ 0oJ 0oJ 0oJ 0oJ 0oJ 0oJ 0oJ 0oJ 0 oJ 0 oJ 0 oJ 0 oJ 0 oJ 0oJ 0o(0b 0Ԝ0Ԝ0Ԝ0Ԝ0Ԝ0Ԝ 0ԜO 0Ԝ0ԜO 0Ԝ0Ԝ(0b00000(0b0>0>O 0>O 0>O 0>0>0>O 0}>O 0}>O 0}>O 0}>O 0}>O 0}>(0b0 O 0 O 0 O 0 O 0 O 0 O 0 O 0 O 0 O 0 ( 0b00000000000( 0b 0 0!0!0!0!0!0!0 0 0( 0b 0 0 0 0j 0 0: 0: 0: 0: 0 0; 0; 0; 0; 0; 0; 0 0j 0 0 0D 0E 0F 0G000000000 0 0000000000 0 0j 0j 0j 0Hj 0Ij 0Jj 0Kj 0Lj 0Mj( 0j 0O 0O 0O 0O 0O 0O 0O 0O 0O 0O8 0O 0^ 0^ 0^ 0^ 0^( 0j 0 0 0 0 0 0 0N 0O 0PY 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0Q 0R 0S 0 0 08 0 0 0 09 09 09 09 09 09 09 09 09 0 00 0 0 0 ,0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0( 0j 0h* 0Th* 0Uh* 0Vh* 0h*0h*0h* 0h* 0h* 0h* 0h* 0h* 0h* 0h* 0h* 0h* 0h* 0h* 0h*80h*000080h*0c20c280h*0Q40Q4(0j0i60i60i60i6(0j0<0< 0W< 0X< 0Y< 0Z< 0[< 0\< 0<Z 0<0<0<Z 0<0<Z 0<0<Z 0<0<Z 0<0<Z 0<0<0<( 0j0P0P0P0P0P0P0P(0j 0V 0V 0V 0 0x[ 0x[ 0x[ 0x[ 0]x[ 0^x[ 0_x[ 0`x[ 0ax[ 0bx[ 0cx[ 0dx[(0x[0 a0 a0 a0 a0 a0 a0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a 0 a0 a(0x[0pg0pg0pg0pg(0x[0m0mU 0mU 0mU 0mU 0mU 0mU 0mU 0mU 0mU 0mU 0 m0m80m0q0q0q0q80m0u0u0u(0x[80x0x0x80x0{0{0{0{ 0{ 0{(0x[0f(0x[0(0x[0Ń0Ń(0x[000000000[ 0[ 0[ 0[ 0[ 0[ 0[ 0[ 0[ 0[ 0 [ 0 [ 0 [ 0  0( 0 0Y 0Y 0Y 0Y( 0 0Ԕ 0Ԕ 0Ԕ 0Ԕ( 0 0 0 0 0 0e 0f 0g 0h 0i 0j 0k 0( 0 0՟ 0l՟ 0m՟ 0n՟ 0o՟ 0p՟ 0q՟ 0r՟ 0s՟ 0t՟ 0u՟ 0v՟ 0w՟ 0x՟ 0y՟ 0z՟0՟0՟(000(00˧!0˧!0˧!0˧!0˧!0˧!0˧!0˧!0˧!0˧(00N0N0N(00000(0000(0002 02 02 02 02 02 02 02 00 0 0 0 0 080 0] 0] 0]^ 0]^ 0]^ 0]^ 0]^ 0]0]0]800W0W0W( 0 0 0 0 0 0 0 0(00 0 0 0 0 (000000(000(0000 0 0 0 0 0 0 0 0 0 0 0(00000S0S0S\ 0S\ 0S\ 0S\ 0S\ 0S\ 0S\ 0S\ 0S\ 0S\ 0 S\ 0 S0S0S0S0S(0S00000(0S000(0S0000(0S(0S0@0@0@(0S0 0 0 (0S0Q0Q] 0Q] 0Q] 0Q] 0Q] 0Q0Q0Q0Q(0S0a0a0a(0S0000(0S0D0D 0D 0D 0D 0D 0D0D(0S0!0!(0S0$0$0$0$0$(0S0+0+00l, 0l, 0l,0l,(0l,0N.0N.0N.0N.0N.0N.0N.(0l,06060606(0l,0:0:0:0:0:0:80: 0B 0B 0B 0B 0B 0B 0B80:0F 0F 0F 0F0F 0F 0F 0F 0F 0F80:0J 0J 0J 0J 0J 0J80:0M0M 0M 0M 0M 0M80:0T 0T 0T 0T 0T 0T 0T 0T 0T 0T 0 T(0l,0\0\(0l,0^0^0^(0l,0a0a0a0a(0l,09g00i0i0i0i000l 0l 0l 0l 0l0ld 0ld 0ld 0ld 0ld 0ld 0l0l0l0l0l0l0l0l0l0l0ld 0ld 0l0l0l 0 0 0 0 0 000l 0  0  0{  0|  0} 0l008 08 08 08 08 08 08 08 08 08 0  00l0U 0 U 0 U 0 U 0 U 0U 0U 0U 0U 0U 0U 0(00 0 0_ 000_ 00_ 00_ 00_ 00_ 00_ 00_ 00_ 00_ 0 00_ 0 0_ 0 0_ 0 00_ 0 0000` 0` 00(00v0v0v0vh 0vh 0vh 0vh 0v0v{ 0v{ 0v{ 0v{ 0v{ 0v0v0v(00g0g0g0g0g0g` 0g0g` 0g0g0g` 0g0g!0g!0g0g 0~g0g 0g0g 0g0g 0gc 0gc 0gc 0gc 0gc 0g 0g0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0g 0gb 0gb 0gb 0gb 0gb 0gb 0gb 0gb 0gb 0gb 0 gb 0 g 0g 0ge 0ge 0ge 0ge 0ge 0ge 0ge 0ge 0ge 0ge 0 g 0g(0 0 0 0o 00o 00o 00o 0o 0o 0o 00o 00o 00o 0 0 0< 0< 0< 0< 0< 0< 0< 0< 0< 0< 0 < 0 < 0 < 0 < 0 < 0< 0< 0 0 0 0 0 0< 0$ < 0$ < 0$ < 0$ < 0$ < 0$ < 0$ < 0$ < 0$ < 0 $ < 0 $ < 0 $ < 0 $ < 0 $ < 0$  0(00000l 0l 0l 0l 0l 0l 0l 0l 0l 0l 0 l 0 l 0 l 0 l 0 l 0000l 0l 0l 00p 0p 0p 0(0000000000000(00 )0 )0 )0 )| 0 )| 0 )| 0 )| 0 )| 0 )| 0 )| 0 )| 0 )| 0 )0 )0 )} 0 )} 0 )} 0 )} 0 )} 0 )} 0 )} 0 )} 0 )} 0 )0 )~ 0 )~ 0 )~ 0 )(00;0;"0;"0;0;f 0;f 0;f 0;f 0;f 0;f 0;f 0;f 0;f 0;f 0 ;f 0 ;f 0 ;f 0 ;f 0 ;f 0;f 0;f 0;0;q 0;r 0;t 0;t 0;v 0;w 0;x 0;y 0;z 0;(00MM0MM0MM0MM0MM0MM0MM0MM0MM(0 0W 0W 0W 0W(00]0] 0] 0] 0] 0] 0] 0]0l 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d 0d0l 0} 0}0l0 0 0 0 00000 0 0 0 000 0 0 0 00 0` 0` 0` 0` 0` 0` 0` 0`0000 0 0 0 0 0 00 0 0 0 00l(0v0 0 0 0 0 0 0 0 0 0 0  0 0000 0 0 0 0 0 0 0 0 0 0  0  0  0  0 (0v000 0 0 0 0ۮ 0ۮ 0ۮ 0ۮ 0ۮ 0ۮ 0ۮ 0ۮ800008000080000 0 0 0 0 0 0 00 0 0 0000l00 0 0 0 000(00707070707(00000 0 0 0(000000(000000(00| 0| 0| 0| 0| 0| 0| 0| 0|0|0|z 0|z 0|z 0|z 0|z 0|z 0|z 0|(0 0y 0y 0y 0y 0y 0y 0y 0y 0y 0 y 0 y0l(0000 0 0 0 0 0(00 0 0 0 (00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0D 0 0D 0 0(00#'n 0#'n 0#'n 0#'n 0#'n 0#'n 0#'n 0#'n 0#'n 0#'n 0 #'n 0 #'n 0 #'n 0 #'0l0?<0?<0?<0?< 0?< 0?< 0?< 0?<0?<0?<0?<0?<0?<0?< 0?< 0?< 0?< 0?<      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrtuvwxyz{|}~ 0?< 0?< 0?< 0?<0?<0?<0?<0?<0l0]0]0]0]0]0](0]0]f0]f0]f(0]0rk0rk0rk0rk(0]0q0q0q0q0q(0]0}|0}|0}|0}|0}|(0]0Ą 0Ą 0Ą 0Ą 0Ą 0Ą0Ą0Ą 0Ą 0Ą 0Ą 0Ą 0Ą 0Ą 0Ą 0Ą0Ą0Ą0Ą 0Ą 0Ą 0Ą 0Ą 0Ą0l00000(00g0g 0g 0g 0g 0g 0g(00Z 0Z 0Z 0Z 0 Z 0 Z 0 Z 0 Z 0 Z 0Z 00 0 0 0 0 00l00 0 0 0 0 0 0000(0000000 0 0 0 0 00(00\0\0\ 0\ 0\ 0\ 0\0\0\0\(000800 0 0 0 0 000008000000 0 0 00000800 0 0 000 0 0 0 0 0 0000l00j 0j 0j 0j 0j 0j 0j 00 0 0 0 0 0 0 0 0 0 0 0  0  0  0 0 0 0 0 0 00k 0k 0k 0k 0k 0k 0k 0k 0k 0k 0 k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 0k 0 00l0!A0!A0!A0!A00AD0AD0AD0AD0AD0AD 0AD 0AD 0AD 0AD0AD 0AD 0AD 0AD0AD0N0N0N0N0N0N 0N 0N 0N 0N 0N 0N0N0N 0N 0N 0N 0N 0N 0N 0N 0N 0N0N 0N 0N 0N 0N(0N0Fl0Fl0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0 Fl 0 Fl 0 Fl 0 Fl 0 Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl0Fl0Fl 0Fl 0Fl 0Fl0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl 0Fl(0N0:0:80:0h0h0h0h0h0h80:000080:00080:0000000000(0N000000(0N 0= 0= 0= 0= 0= 0= 0=0=80=0L 0L 0L 0L0L0L0L0L 0L 0L 0L 0L 0L 0L 0L 0L0AD0$0$0$(0$01 01 01 01 01 01 01 01018010080100801008010!80100 0 0 0 0 0 0 0 0 0 0  0  0 80100801008010 0 (0$0 0 0 (0$0 0 0 0 00 0 0 0 0 0 0 0 0 0 ! 0 ! 0 ! 0 ! 0 0 0 0 " 0 " 0 $ 0 $ 0 $ 0 0 0 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0  % 0  % 0  % 0  % 0  % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0 % 0  % 0! % 0" % 0# % 0$ % 0% % 0& % 0' 0 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0  ' 0  ' 0  ' 0  ' 0  ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0 ' 0  ' 0! ' 0" ' 0# ' 0$ ' 0% ' 0& ' 0' 0 0 0 0 0 0 0 0 0 0 0 0 0  0  0  0  0  0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0  0  0 0 0 0 0 0 0 0 0 0 0 0 0 0  0  0  0  0  0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 80 0 0 0 0 0 0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 #0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 006 006 06 06 06 06 0; 0; 0; 06 09? 09? 09? 09? 09? 09? 09? 09? 09? 09? 09? 06 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 0S 06 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 0j 005m 05m 05m 05m 0p 0p 0p 0p 0p 0p 0p 05m 0#z 0#z 0#z 0#z 0#z 0#z 0#z 0#z 0#z 0#z 05m 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 05m 0M 0M 0M 0M 0M 0M 0M 0M 0M 0M 0M 0M (0M 0 0 0 0 0 (0M 0~ 0~ 0~ 0~ 0~ 0~ 0~ 0~ 0~ 0~ 0 ~ 0 ~ 0 ~ 0 ~ 0 ~ 0~ 0~ 0~ 0~ 0~ 0~ (0M 0 0 0 0 0 80 0Y 0Y 0Y 0Y 80 0 0 0 0 0 0 0 0 0 0 0 0 80 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 80 0a 0a 0a 0a (0M 0 0 0 0 0 0 0 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  (0M 0O 0O 0O 0O 0O 0O 0O 0O (0M 0& 0& 0& 0& 0& 0& 0& 0& (0M 0 0 0 0 (0M 0 0 0 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 0 0 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 0 0 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 0 0 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0 000 0C 0C 0C 0C 0C 0C 0C 0C 0C 0C 0C 0C 0C 0 0 0 0 0 0X 0X 0X 0X 0 0  0  0 0 0 0 0 0 0 0 0 0& 0 0 4 0  4 0  0  4 0  4 0  0  4 0  4 0  0  4 0  0  0  0 0 0 0 0 0 0 0 0" 0" 0" 0" 0 0|. 0|. 0|. 0|. 000A9 0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9  0L9 0L9 0L9 0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  $0L9 0L9  0L9  0L9  0L9  0L9  $0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  $0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9  0L9 0L9  0L9  0L9  0L9  0L9 0L9 0A9 0M` 0M`  0M` 0M`  0M` 0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M`  0M` 0A9 0A9 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0y 0A9 0 0 0 0 0 0 0 0 0 0 0 0  0  0  0 0 0 0 0 0 0 0 0 0 0  0  0  0  0  0 0 0 0 0 0A9 0 0 0 0 0 0 0 0 0 0A9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0! 0" 0A9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0A9 0 0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0 "0! "0" "0# "0$ "0% "0& "0' "0( "0) "0* "0+ "0, "0- "0. "0/ "00 "01 "02 "03 "04 "05 "06 "07 "08 "09 "0: "0; "0< "0= "0> "0? 0 0A9 0 0 0 0 0 0 0 0 0A9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00000 0 0 0 0 0 0 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 0000 0 0 0 0 0 0 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 0000 0 0 0 0 0 0 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 0000 0 0 0 0 0 0 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 0000 0 0 0 0 0 0 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 00 0 0 0 000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 00@(0t@&0@&0z0z0z0z0@'0@'0z0z0z0z0@'0@'0z0@'0@'0z0@&0z0z0z0z0 |ڞ0:000:000:000:000:000:0&00:00:0#0#0:000:000:0000:0"0:0"0:0"0:0& 00:0+0:0-&00000000:06&0:08&0:0:&0:0<&0:0+0:0" 0 0 000:0/0:00:010@:0&0:0 0:0 0:0 0:0 0:0 0@:00h%WXݩީ{|:;mnEF jk67u v wx%&U V s t )!*!Y"Z"""""""""""S&T&r&s&z&{&&&&&&&CCCCCC(D)D,D-DSDTDDDDDDDDDDDDDDDEE E E,E-E.E/ELEMEOEPERESEUEVEEEEEEEEEEE]]``FcGceehhkkOmPmmmQnRnrr;sVWvw:;lmmnz{./=>abvw<=CDRS|} !xy56GH VWef:;=>67~,-MN)*tu&'ijJKst XYuvabKLNO;< 34ST}~;<z{<=gh;<67UVopef demnPQ  {!|!kl23()JK&&&&&&&&&&''''*'+'8'9'N'O'i'j'''''''''''''''..P/Q/708000ddddJeKeeeg h P Q P Q p q         & ' - . 4 5 ; < B C I J P Q W X ^ _        & ' - . 4 5 ; <     n o   9 9 ; ; ; ; < < < < = = > > ? ? @ @ lA mA =B >B B B C C D D E E F F G G H H kI lI jJ kJ VK WK 5L 6L M M wN xN O O P P `Q aQ aR bR R R US VS TT UT T T T T U U %V &V V V MX NX X X Y Y Z Z Z Z \ \ ] ] ^ ^ ^ ^ o_ p_ /` 0` ` ` a a Kb Lb c c c c c c `d ad d d e e f f 3g 4g g g Sh Th h h ? q r   l m < = e f         k l   P Q 4 5         T U     k l b c                 E F           N O ! ! ! ! ! ! X" Y" " " h# i# # # $ $ m$ n$ $ $ $ $ % % M% N% % % % % % % & & N& O& & & & & & & B' C' ' ' ' ' ' ' Q( R( s( t( ( ( ( ( ( ( *) +) S) T) ) ) ) ) * * <* =* * * * * * * + + 7+ 8+ ]+ ^+ + + + + + + , , *, +, U, V, , , , , , , :- ;- - - - - . . ]. ^. . . . . . . . . / / B/ C/ k/ l/ / / / / 0 0 <0 =0 b0 c0 0 0 0 0 0 0 -1 .1 v1 w1 1 1 1 1 2 2 .2 /2 ]2 ^2 2 2 2 2 2 2 3 3 93 :3 `3 a3 3 3 3 3 4 4 4 4 4 4 4 4 5 5 <5 =5 ]5 ^5 ~5 5 5 5 5 5 5 5 6 6 96 :6 ]6 ^6 6 6 6 6 6 6 6 6 7 7 =7 >7 c7 d7 7 7 7 7 8 8 A8 B8 b8 c8 r8 s8 8 8 8 8 8 8 8 8 8 8 8 8 9 9 19 29 L9 M9 d9 e9 {9 |9 : N :0lht&:0Hlht&:0&lht&:0P&lht&:0zlht&:0 zlht&:0 L&lht&:0&lht&:0<lht&:0tlht&:0lht&:0֘lht&:0֘lht&:0(טlht&:0`טlht&:0|lht&:0 |lht&:0"}lht&:0$<}lht&:0&*lht&:0(*lht&:0*+lht&:0,P+lht&:0.+lht&:00+lht&:02Htlht&:04tlht&:06tlht&:08tlht&:0:(ulht&:0<`ulht&:0>ulht&:0@klht&:0Bklht&:0Dllht&:0FLllht&:0Hllht&:0Jllht&:0Lllht&:0N,mlht&:0lht& :0Rm:0T:0V:0X :0ZD:0\|:0^:0`:0b$:0d\:0f:0h:0jx:0l:0n:0p :0rX:0t:0v:0x˜:0z8˜:0|p˜:0~˜:0˜:0Ø:0PØ:0:0؞:0:0H:0:0:0:0(:0`:0:0Р:0:0@:0x:0:0衘:0 :0d̘:0̘:0̘:0 ͘:0D͘:0|͘:0͘:0͘:0$Θ:0\Θ:0Θ:0Θ:0Ϙ:0<Ϙ:0tϘ:0Ϙ:0Ϙ:0И:0TИ:0И:0И:0:04ј:0:0 :0D:0|:0:0:0$:0\:0:0:0:0<:0t:0:0:0:0T:0:0:0:04 :0l :0 :0:0!(03:0L!(03:0 !(03:0 !(03:0#(03:0<#(03:0t#(03:0#(03:0#(03:0$(03:0T$(03:0((03:0$:0 $:0"4%:0$l%:0&%:0(%:0*&:0,L&:0.&:00&:02&:04,':06d':08':0:':0< (:0>D(:0@|(:0B(:0D(:0F$):0H\):0J):0L):0N*:0P<*:0Rt*:0T,:0V8,:0Xp,:0Z,:0\,:0^-:0`P-:0b-:0d-:0f-:0h0.:0jh.:0l.:0n.:0p/:0rH/:0t/:0v/:0x/:0z(0:0|`0:0~0:00:01:0@1:0x1:01:01:0 2:0X2:02:02:03:083:0p3:03:03:04:0P4:04:04:04:06:07:0H7:07:07:07:0(8:0`8:08:08:09:0@9:0x9:09:09:0 ::0X::0::0::0;:08;:0p;:0;:0;:0<:0P<:0<:0<:0<:00=:0h=:0=:0=:0>:0H>:0>:0>:0>:0(?:0`?:0?:0?:0@:0@@:0x@:0@:0@:0 A:0XA:0 A:0 A:0B:08B:0D:0D:0D:0(E:0`E:0E:0E:0 F:0"@F:0$xF:0&F:0(F:0* G:0,XG:0.G:00G:02H:048H:06pH:08H:0:H:0<I:0>PI:0@I:0BI:0DI:0F0J:0HhJ:0JJ:0LJ:0NK:0PHK:0RK:0TK:0VK:0X(L:0Z`L:0\L:0^L:0`M:0b@M:0dxM:0fM:0hM:0j N:0lXN:0nN:0pN:0rO:0t8O:0vpO:0xO:0zO:0|P:0~PP:0P:0P:0P:00Q:0hQ:0Q:0Q:0R:0HR:0R:0R:0U:0U:0U:0,V:0dV:0V:0V:0 W:0DW:0|W:0W:0W:0$X:0\X:0X:0X:0Y:0 k:0@Xk:0Bk:0Dk:0Fl:0H8l:0Jpl:0Ll:0Nl:0Pm:0RPm:0Tm:0Vm:0Xm:0Z0n:0\hn:0^n:0`n:0bo:0dHo:0fo:0ho:0jo:0l(p:0n`p:0pp:0rp:0tq:0v@q:0xxq:0zq:0|q:0~ r:0Xr:0r:0r:0s:08s:0ps:0s:0s:0t:0Pt:0t:0      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnoprstuvwxyz{|}~t:0t:00u:0hu:0u:0u:0v:0Hv:0v:0v:0v:0(w:0`w:0w:0w:0x:0@x:0xx:0x:0x:0 y:0Xy:0y:0y:0z:08z:0pz:0z:0z:0{:0P{:0{:0{:0{:00|:0h|:0|:0|:0}:0H}:0}:0}:0}:0(~:0`~:0~:0~:0:0@:0x:0:0:0 :0X:0:0Ȁ:0:08:0 x:0 :0:0 :0X:0:0Ȇ:0:08:0p:0:0 :0":0$P:0&:0(:0*:0,0:0.h:00:02؉:04:06H:08:0::0<:0>(:0@`:0B:0DЋ:0F:0H@:0Jx:0L:0N:0P :0RX:0T:0Vȍ:0X:0Z8:0\p:0^:0`:0b:0dP:0f:0h:0j:0l0:0nh:0p:0rؐ:0t:0vH:0x:0z:0|:0~(:0`:0:0В:0:0@:0x:0:0:0 :0X:0:0Ȕ:0:08:0p:0:0:0:0P:0:0:0:00:0h:0:0ؗ:0:0H:0:0:0:0(:0`:0:0Й:0:0@:0x:0:0:0 :0X:0:0ț:0:08:0p:0:0:0:0P:0:0\>0 ?0(EEEEEPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPmmmmmm) ^  ~ n8y?!HYj! y!"##~$`%&&'(()S**+g,3--./h0S1223q455678V9::;i<E=&>??x@OA>BBCDsE,FFGlH2IIJtK5LLMCNNOPqQ1R+SSTaU VWWXxYFZZ[\]g^1__`a|b>ccdvekfdg/hhiNj-k(llmUn$oopqsrsstwu6vvwxyz{N|+}~~?5 ق_`;ljx[_XEu)3EMXXchAjklomnoqq|sOuߡ[n8oo+R)G w a  -LtHbj)DeA)!+14<TXgm܆PT9J2J]f5r//7B*+9@nd-f(@U N U x[ e Ay } ` n ,| 2 C 7[ r h %   p$ . 2 6 : u? rD eE lI W W ,/012456789:;<=>?@BCDEFGHIJKLMNOQRSTUVWXYZ[\]^`abcdefghijklnopqrstuvwxyz|}~ *-/3478:<>ABEGO\fjvy~  #%&+035;=?BDFJN_cfgmosvxz}~ R l  % < R k  d!p+5@KT_iqtcX{:m@E j6H*RY7w%&%&&'U(s())Y*********.r.z....]57EKKL,LhLLLM.MOMUMMMM^eghjFkmmnppDr:sOuuQvz;{{ʢ~ ݥͩ%C]e/~֬#Z+BVv:lWz.=av<CR| x5G Ve:=6~,M)t&iJsXuaKN;    3 S }     ; z     < g     ;       6Uoe=dmP? %&''({)1Fawdμu-8!26>Ojy~xخA (!J""&.....//*/8/N/i/////2P7788>T0hllJmm5xŕP4 p2F~Sfy{X9i9/K}: 1UAN_ctaњ +@L^ D hE E F sG eH I J @K L L M N O XP P wR ?S >T *U V qW KX zY Z 4[ 5\ \ )] (^ d^ ^ _ _ T` !b zb kc c d f zg g ph Ci j j uk l l m m 4n n {o ]p q q 'r r s s t t Tu u Nv v w w x x &y y rz @{ { | | j} } D~ ~ ~  O : ΃ ? D  J ? `       R G    B  # O /     ^       E   @    9     ?  $  S  h  ( ?! 6" v" " # # r$ f% % & ' ' ' ( ( o) "* l* a+ + + ,, , <- - - A. z. . . !/ T/ / / / "0 X0 0 0 1 T1 1 1 %2 G2 t2 2 2 2 '3 a3 3 3 4 X4 4 4 4 5 15 \5 5 5 5 5 )6 ]6 6 6 7 X7 7 7 18 f8 8 8 8 8 9 ?9 q9 9 9 : 6: Z: : : ; J; ; ; ; < 1< Z< < < < = 4= m= = Z> > > > > ? 1? R? w? ? ? ? @ 1@ U@ x@ @ @ @ A 7A \A A A B 6B FB XB pB B B B B B C C 8C OC D D F K U W -3AP_m{     !"#$%&'()+,.012569;=?@CDFHIJKLMNPQRSTUVWXYZ[]^_`abcdeghiklmnopqrstuwxz{|}    !"$'()*,-./1246789:<>@ACEGHIKLMOPQRSTUVWXYZ[\]^`abdehijklnpqrtuwy{|                          ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k m n o p q r s t u v w x y z { | } ~                           ! " # $ & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; = > ? @ A B C D E F G H I J K L M N O P Q S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j l m n o p q r s t u v w x y z { | } ~  W .*-.0P[wz{} !#C]y|}0LOPRr 1Zvyz|-ILMOo9Xtwxz,HKLNn   # 8 T W X Z z  7 : ; = ] ~  " J f i j l  5 8 9 ; [     7 m B^abd#&')IgA]`ac7SVWYy!=@ACc 3f>Z]^`.124T!Al)EHIKk"7SVWYy>^z}~6RUVXx<`|;WZ[]}:=>@`x/KNOQq  +Hdghj    ; _ { ~  "!>!A!B!D!d!!!!!!!!!!""""8"T"W"X"Z"z""""""""####7#N#j#m#n#p########%$A$D$E$G$g$$$$$$$$ %%%%4%k%%%%%%%%%%%&C&_&c&d&f&&&&&&&&'.'2'3'5'U't'''''''''''(C(_(c(d(f(((((((()-)1)2)4)T)x)))))))))))*3*O*S*T*V*v*******++"+#+%+E+i+++++++++++,/,K,O,P,R,r,,,,,,,, ----4-S-o-s-t-v-------.:.V.Z.[.].}.......%/A/E/F/H/h////////0 0 00/0K0g0k0l0n00000000101415171W1y11111111112 2?2[2_2`2b222222222333!3A3c333333333334'4C4G4H4J4j444444455#5$5&5F5j55555556666'6[6w6{6|6~6666666775797:7<7\7w77777777777868R8V8W8Y8y8888888 9)9-9.909P99999999::::?:[:w:{:|:~::::::::1;M;Q;R;T;t;;;;;;; <&<*<+<-<M<`<|<<<<<<<<<<<1=M=Q=R=T=t=======>#>'>(>*>J>g>>>>>>>>>>>>?2?6?7?9?Y?l??????????? @*@F@J@K@M@m@@@@@@@@ AAAA3AQAmAqArAtAAAAAAAAB2B6B7B9BYBBBBBBBBBCCC#C2CNCRCSCUCuCCCCCCCCDDDD6DHDdDhDiDkDDDDDDDDDEEEE8ENEjEnEoEqEEEEEEEEFF!F"F$FDFSFoFsFtFvFFFFFFFFG G$G%G'GGG}GGGGGGGHH H H+HdHHHHHHHHHHHILIhIlImIoIIIIIIIIJ*J.J/J1JQJzJJJJJJJK K K K,KSKoKsKtKvKKKKKKKKLL"L#L%LEL^LzL~LLLLLLLLLLM;M?M@MBMbMMMMMMMMNN N N+NNNjNnNoNqNNNNNNNO6OROVOWOYOyOOOOOOOPP!P"P$PDPePPPPPPPPPPPQ6QRQVQWQYQyQQQQQQQR R$R%R'RGRuRRRRRRRRRRRS6SRSVSWSYSySSSSSSSSTTTT:TsTTTTTTTTTTTUIUeUiUjUlUUUUUUUU%VAVEVFVHVhVVVVVVV W)W-W.W0WPWjWWWWWWWWWWW X%XAXEXFXHXhXXXXXXXY3Y7Y8Y:YZYYYYYYYYZZZZ7ZaZ}ZZZZZZZZZZ[>[Z[^[_[a[[[[[[[[\0\4\5\7\W\z\\\\\\\\\\\]3]O]S]T]V]v]]]]]]]^$^(^)^+^K^l^^^^^^^^^^^_I_e_i_j_l________`0`4`5`7`W`~```````aaaa(a7aSaWaXaZazaaaaaaaaaaaabObkbobpbrbbbbbbbc.cJcNcOcQcqccccccc d)d-d.d0dPdddddddddddde5eQeUeVeXexeeeeeeeef f f f,fVfrfvfwfyffffffff g%g)g*g,gLgggggggghhh h@hah}hhhhhhhhhhi2iNiRiSiUiuiiiiiiii jjjj3jXjtjxjyj{jjjjjjjjk7k;kk^kkkkkkkk llll2lNljlnlolqllllllllmm"m#m%mEm\mxm|m}mmmmmmmmmn7n;nn^n~nnnnnnnnnnno"o>oBoCoEoeooooooo p&p*p+p-pMpnpppppppqqqq;qwqqqqqqq rrrr2rjrrrrrrrss s s+sfsssssssssss t3tOtStTtVtvtttttttu,u0u1u3uSu~uuuuuuuvvv v*vOvkvovpvrvvvvvvvvw8wx@x`xxxxxxxxyyyy6ykyyyyyyyyyyz!zHzdzhzizkzzzzzzzzz{{{ {){Y{u{y{z{|{{{{{{{{||2|d||||||||||| }B}^}a}b}d}}}}}}}}~;~>~?~A~a~~~~~~~~<b~@\_`bŀȀɀˀ69:<\ȁ4Wsvwyт7SVWYyƒă69:<\Ɠ2^hY{.8bvʣ Ʀ3:Ytx!<l !!!P%w%%]///`5z55667==">BB/CUDeDhDxDJJ-K@M[MMMN NNNNOOPWPPPQ?QfQQR%R&SNSjSSSTWXMX^^__7_D_q````a?aEamaaaaaa)b@bBbibbbbbc2cDcccd d6dUd]ddddde e6eLeNepeeLfsfffffffggHgcgeggggggh-hYh iEiYiaiiiiiii jBjDjjjjjjjPk}kkkkkkkll@lPlRlzlllllll mnnn=eٟ#- -W8btɵж'Q +T^ PV}z@Up_~Bm 5Vh%Vm}4CpY's'Po,=g~>N{l1@k4Do-=eu#4\~EU-S_p)AQ~ /jz;O`9lDXh+CRtEU3BuG[($R$$,-)-#DPDwDDDDHPuPP(HŖ>iXÞΞ]f%cĠߠ-8y&\;J#4?7;!j!!j<<<=>I>$GNGG]]]o p+pѢIز ?eʳChϴ q"Oms$*OhIYB~3\v)*\|###/(b(()))667 9:9b9U;p;;VVVZZZZ[9[@\i\w\X___ecccdddeDeHeqeeeiijjHjjjvkkkpq9q~%Kx1VP} "C|7Vac    8 G  V-x--:::[LLLLM'MaMMMtUUUPVVVWiWWZZB[p\\\}]]]Iggg[hhh i1i3iaiiij4jbjHftDo^q((()))*++R+,,,'.J.^.v///u0001 223335555?5U5H6l66666888m:::;UAdͩZoƪ'Z~ի9iŽ/>s'Pf@KoK{KLLLTU8U2kXkkOv3UcPw֗ - V- - D "E fE uF F F MG G G ,H bH H I QI I J ?J oJ J "K `K K %L mL L L M ~M M M ON N N VP P P 0Q mQ Q R dR R T U 2U W 9X uX {Z Z Z [ \ 4\ \ ] S] ] ] ^ Ll l l l m >m mo o o o p Ep p p p p +q eq q q r 'r ar r r r r "s Ys s s s s s )t Mt t t t t 9u hu u u u v Mv uv v v w 'w aw w w x Gx cx x x &y Zy y y y y Qz z z z { 5{ X{ { { { | O| z| | | } J} |} } } } ~ D~ p~ ~ ~ ~ ~ G    5 U Ҁ E {  6 ^ w ӂ | ;| F| " 7 9 ɾ I /I 9I ~ ~ ~ ~ ! @ 2 e  8 \ u ̰ N  X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%̕ X%X%X%X%X%X%X%X%X%X%̕ X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%̕XXXXXXXXXXXXXXXXXXXXXXXXXXXXG$G$XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXtXXXtXtXXtXXtXXtXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXtXtXXXXtXtXXXXXtXXtXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX::XXXXXXXX (/2ELPW[mtx!!!!!!xRS,b$[Oƭ 2@5RZ(     :,WG 3f33ԔԔ?Hazard Severity DefinitionsArial Rounded MT Bold" F   N,WG 3f33ԔԔ?Likelihood of Occurrence DefinitionsArial Rounded MT Bold" Mt ) %*  S" T2  # ) B T2  # %x"B  T2  # 9y#pB  T2  #  \ H%B  `B  c $Dp\ B`B  c $D}B`B  c $DTB`B  c $DpAB`B  c $D! ,#B`B  c $DM$BTB  C D9x"U$BZ  3  $A*B  h  S  Ԕ `"$  n   c $ D["< Xt (U,! / S"T" 0 # 0!((|0 T" 1 # 1#!0 T" 2 # 2D0 T" 3 # 3L!0 T" 4 # 4 (|0 T" 5 # 5 0 ZB 6 S D ` 0ZB 7 S D|L0ZB 8 S DmL0ZB 9 S D0E0ZB : S Da !0`B ; c $D&|(0Z < 3 <&U,!0 h = S >"! h ? S >" h @ S ̙>" b A C AԔ" h B S B̙jJ" b C C C" h D C D#"  b E C E" \ F 3 " h G S Ԕ"  h H S >" h I S Ԕ"  h J S >" JB K # 8c?"JB L # 8c?" M <!M?" ! N < N?"   O <O?"  P <P?"  Q <Q?"  R <R?3" B S  ? /01234)_T [a?jUrB,G˴: N L$%$(tK $tP S tR /(6jQ W tO tN  tMN t/1:'6tID $ TQ) kt, #tAntG( 5%T=L%F T j5%t?/5%_ TJ$THf5%tQ-$ t@FK$T@$ TAV$l%TBT?@TCT^TD $.LEK#$PT| _Hlt510937842 _Hlt510938304 _Hlt510940662 _Hlt510938176 _Hlt510937662 _Hlt510937663 _Hlt510516705 _Hlt509733983 _Hlt509736744 _Hlt509733989 _Toc354286883 _Toc418475030 _Hlt509732289 _Toc510937329 _Toc353613393 _Toc353613721 _Toc353614021 _Toc353949772 _Toc353952929 _Toc354286884 _Toc418475031 _Toc510937330 _Hlt510938431 _Toc353613394 _Toc353613722 _Toc353614022 _Toc353949773 _Toc353952930 _Toc354286885 _Toc418475032 _Toc510937331 _Hlt510938434 _Hlt510938503 _Hlt510938763 _Hlt510938681 _Hlt510938447 _Hlt510938661 _Hlt509906728 _Hlt510938751 _Hlt510256452 _Toc353613395 _Toc353613723 _Toc353614023 _Toc353949774 _Toc353952931 _Toc354286886 _Toc418475033 _Toc510937332 _Hlt509733932 _Toc510937333 _Hlt509732265 _Hlt510939552 _Toc510937334 _Hlt510939557 _Hlt510939668 _Hlt510939764 _Hlt510939679 _Hlt510939609 _Hlt510939682 _Hlt510939759 _Hlt510939769 _Hlt510939774 _Hlt510939778 _Hlt510939783 _Hlt510939796 _Hlt510939799 _Hlt510939809 _Hlt510939812 _Hlt510939867 _Hlt510939815 _Hlt510939870 _Hlt510939874 _Hlt510939878 _Hlt509734001 _Hlt510939608_2._SYSTEM_SAFETY _Toc510937335 _Hlt510939667_2._SOFTWARE_SAFETY _Toc353613396 _Toc353613724 _Toc353614024 _Ref353694553 _Toc353949775 _Toc353952932 _Toc354286887 _Toc418475034 _Toc510937336 _Hlt509734009 _Hlt509732250 _Hlt510939887 _Toc418475042_2.2_What_Makes _Toc510937337 _Toc510937338 _Toc418475044 _Toc510937339 _Toc418475046 _Toc510937340 _Hlt510939897 _Ref326056984 _Ref326057004 _Toc342648307 _Toc353755557 _Toc354279311 _Toc355062088 _Toc501774994 _Toc501775475 _Toc501775723 _Toc501780325table21 _Toc509733956 _Hlt509734010 _Hlt510939889 _Hlt501775143 _Toc510937341 _Toc418475045 _Toc510937342 _Toc510937343 _Toc509733461figure21 _Toc509733908 _Hlt509734049 _Hlt509734086 _Hlt509734105 _Hlt510939977 _Hlt510938166 _Toc353613404 _Toc353613732 _Toc353614032 _Toc353949783 _Toc353952938 _Toc354286892 _Ref417876714 _Ref417876716 _Ref417876718 _Toc418475040_2.1_Safety_Requirements _Toc510937344 _Hlt510939908 _Hlt509732237 _Toc353613397 _Toc353613725 _Toc353614025 _Toc353949776 _Toc353952933 _Toc354286888 _Ref417808490 _Ref417808495 _Ref417874335 _Ref417874339 _Ref417874345 _Toc418475035_2.3_Preliminary_Hazard_2.4_Preliminary_Hazard _Toc510937345 _Hlt509716390 _Hlt510939974 _Hlt510939990 _Hlt509732231 _Hlt509732197 _Toc353613398 _Toc353613726 _Toc353614026 _Toc353949777 _Toc353952934 _Toc354286889 _Ref417808523 _Ref417808526 _Ref417876640 _Ref417876646 _Ref417876678 _Toc418475036_2.3.1_PHA_Approach _Toc510937346 _Hlt510939998 _Hlt509732183 _Hlt509732188 _Hlt509734026 _Toc501774995 _Toc501775476 _Toc501775724 _Toc501780326table22 _Toc509733957 _Hlt509734028 _Hlt510939999 _Hlt510943191 _Toc353613399 _Toc353613727 _Toc353614027 _Toc353949778 _Ref417788503 _Ref417788511_2.3.1.1_Identifying_Hazards _Toc510937347 _Toc353613400 _Toc353613728 _Toc353614028 _Ref353771273 _Ref353771306 _Ref353771334 _Toc353949779 _Ref417790106_2.3.1.2_Risk_Levels_2.4.1.2_Risk_Levels _Toc510937348 _Ref326057532 _Ref325844823 _Ref325844853 _Toc342648304 _Toc353755556 _Hlt510940016 _Hlt510940661 _Hlt510940664 _Hlt501776640 _Hlt509734034 _Ref418475840 _Toc501774996 _Toc501775477 _Toc501775725 _Toc501780327table23 _Toc509733958 _Hlt509734036 _Hlt509734240 _Hlt509734312 _Hlt510940077 _Hlt510940482 _Hlt510940019 _Hlt510940030 _Hlt509732170table24 _Toc509733959 _Toc510937349 _Toc353613401 _Toc353613729 _Toc353614029 _Toc353949780 _Toc353952935 _Toc354286890 _Toc418475037 _Toc510937350 _Hlt509907343 _Hlt510940037 _Hlt509716993 _Hlt509732164 _Hlt509734084 _Hlt510940042 _Hlt509734044 _Hlt509734094 _Hlt509125324 _Hlt509125325 _Hlt509734103 _Hlt510940048 _Ref325330469 _Toc325744900 _Toc325850377 _Toc326050642 _Toc342648518 _Toc353756859 _Ref417788295 _Ref417788412 _Ref417788466 _Toc501774997 _Toc501775599 _Toc501780261 _Toc510937351_2.4.3_Tools_and_1 _Hlt509732209 _Hlt509717102 _Toc509733462 _Toc509733463figure22 _Hlt509734096 _Hlt510940043 _Hlt510940050 _Toc353613402 _Toc353613730 _Toc353614030 _Toc353949781 _Toc353952936 _Toc354286891 _Toc418475038_2.4.3_Tools_and _Toc418475039 _Toc510937352 _Toc353613406 _Toc353613734 _Toc353614034 _Toc353949785 _Toc353952940 _Toc354286894 _Toc354289081 _Toc355073996 _Toc418475047_2.5_Software_Subsystem _Toc510937353 _Hlt509732151 _Hlt510940061 _Hlt509224043 _Hlt509224044 _Hlt510940076 _Hlt509734238 _Hlt510940080 _Hlt510940089 _Hlt510940108 _Hlt510940092 _Hlt510940110 _Hlt510940112 _Hlt510940127 _Hlt510940115 _Toc353613417 _Toc353613745 _Toc353614045 _Ref353679625 _Toc353949798 _Toc353952953 _Toc354286906 _Ref417790611 _Ref417790618 _Ref417790620 _Ref417799306 _Ref417799309 _Ref417799311 _Ref417873424 _Ref417873426 _Ref417873501 _Toc418475060 _Hlt509718097_3._SOFTWARE_SAFETY_3._SOFTWARE_SAFETY_1 _Hlt510940180 _Hlt510939751 _Toc510937354_3._SOFTWARE_SAFETY_2 _Hlt509288714 _Hlt510940188 _Hlt509732137 _Hlt510940192 _Hlt509732132 _Toc509733464figure31 _Toc509733909 _Toc353613408 _Toc353613736 _Toc353614036 _Toc353949787 _Toc353952942 _Toc354286896 _Toc418475049 _Toc510937355 _Hlt510940198 _Hlt509732126 _Hlt509734248 _Ref353689052 _Toc353755558 _Toc501774998 _Toc501775478 _Toc501775726 _Toc501780328table31 _Toc509733960 _Hlt509734003 _Hlt509734250 _Hlt509734419 _Hlt510940199 _Hlt510940711 _Hlt510940204 _Hlt510940207 _Hlt510940212 _Hlt510940255 _Hlt510940259 _Hlt510940262 _Hlt510940265 _Hlt510940268 _Hlt510940284 _Hlt510940288 _Hlt510940294 _Hlt510940304 _Hlt510940307 _Hlt510940310 _Hlt510940313 _Hlt510940317 _Hlt510940320 _Hlt510940324 _Hlt510940327 _Hlt510940330 _Hlt510940333 _Hlt510940336 _Hlt510940339 _Hlt510940342 _Hlt510940345 _Hlt508609748 _Hlt508609749 _Hlt510940351 _Hlt510940381 _Hlt510940378 _Hlt510940354 _Hlt510940384 _Hlt510940387 _Hlt510940391 _Hlt510940394 _Hlt510940397 _Hlt510940448 _Hlt510940400 _Hlt510940423 _Hlt510940450 _Hlt510940453 _Hlt510940456 _Hlt510940459 _Toc353613409 _Toc353613737 _Toc353614037 _Toc353949788 _Toc353952943 _Toc354286897 _Ref417802595 _Ref417802598 _Ref417802599 _Ref417890006 _Ref417890008 _Ref417890010 _Toc418475050 _Toc353613411 _Toc353613739 _Toc353614039 _Ref353694171 _Toc353949792 _Toc353952947 _Toc354286901 _Toc354289088 _Ref417791070 _Ref417791074 _Ref417791075 _Ref417791145 _Toc418475054 _3.2_Scope_of _Toc510937356 _Hlt510940466 _Toc510937357 _Hlt510938291 _Toc510937358 _Hlt509288716 _Toc510937359 _Hlt510940475 _Hlt509723175 _Hlt509723181 _Toc342648310 _Toc353755560 _Ref417791369 _Ref417888213 _Ref417888221 _Toc501774999 _Toc501775479 _Toc501775727 _Toc501780329 _Toc509733961table32 _Toc510937360 _Hlt509732083 _Hlt510940479 _Hlt509734310 _Toc501775000 _Toc501775480 _Toc501775728 _Toc501780330 _Toc509733962table33 _Hlt509734397 _Hlt510940700 _Toc501775001 _Toc501775481 _Toc501775729 _Toc501780331 _Hlt510940487 _Hlt509734340 _Toc509733963table34 _Hlt510940488 _Hlt509732076 _Toc509733465 _Toc509733910figure32 _Toc353949790 _Toc353952945 _Toc354286899 _Toc418475052 _Toc510937361 _Hlt510940666 _Hlt510940695 _Hlt509734360 _Hlt510940493 _Hlt509734382 _Hlt510940641 _Hlt510940657 _Hlt509732058 _Hlt510940699 _Hlt509734395 _Hlt510940702 _Hlt509732066 _Ref326057863 _Ref326057878 _Toc342648311 _Toc353755561 _Toc501775002 _Toc501775482 _Toc501775730 _Toc501780332 _Toc509733964 _Hlt509734384 _Hlt509734442 _Hlt510940644 _Hlt510940639table35 _Toc510937362 _Toc501775003 _Toc501775483 _Toc501775731 _Toc501780333 _Toc509733965table36 _Hlt510516707 _Ref509393694_3.2.3.3_Tailoring_the _Toc510937363 _Hlt510940710 _Hlt509734417 _Hlt509732046 _Hlt510940723 _Hlt510940713 _Hlt509731987 _Hlt510940726 _Hlt509731938 _Hlt509731992 _Toc353613410 _Toc353613738 _Toc353614038 _Toc353949789 _Toc353952944 _Toc354286898 _Toc418475051_3.2.3.3.1_ Full _Software _Toc510937364 _Toc310070589 _Toc314543161 _Toc314562863 _Toc315072592 _Toc315587129 _Toc315587850 _Toc315685052 _Toc315688316 _Toc315847089 _Toc315847590 _Toc315848692 _Toc315849095 _Toc315858913 _Toc315861705 _Toc316189711 _Toc316280361 _Toc316450398 _Toc318707205 _Toc318872396 _Toc318880999 _Toc320692768 _Toc320693536 _Toc321291732 _Toc321294942 _Toc322364290 _Toc322364632 _Toc322364912 _Toc322728764 _Toc322980373 _Toc323506573 _Toc323677599 _Toc325167064 _Toc325219792 _Toc325743966 _Toc325849737 _Toc326050231 _Ref326057680 _Toc326122770 _Toc334261367 _Toc342648004 _Toc353613413 _Toc353613741 _Toc353614041 _Toc353949794 _Toc353952949 _Toc354286903 _Toc418475057 _Toc510937365 _Toc310070590 _Toc314543162 _Toc314562864 _Toc315072593 _Toc315587130 _Toc315587851 _Toc315685053 _Toc315688317 _Toc315847090 _Toc315847591 _Toc315848693 _Toc315849096 _Toc315858914 _Toc315861706 _Toc316189712 _Toc316280362 _Toc316450399 _Toc318707206 _Toc318872397 _Toc318881000 _Toc320692769 _Toc320693537 _Toc321291733 _Toc321294943 _Toc322364291 _Toc322364633 _Toc322364913 _Toc322728765 _Toc322980374 _Toc323506574 _Toc323677600 _Toc325167065 _Toc325219793 _Toc325743967 _Toc325849738 _Toc326050232 _Toc326122771 _Ref333219259 _Toc334261368 _Toc342648005 _Toc353613414 _Toc353613742 _Toc353614042 _Toc353949795 _Toc353952950 _Toc354286904 _Toc418475058_3.2.3.3.3_ Minimum _Software _Toc510937366 _Hlt510940734 _Hlt510940737 _Toc510937367_3.2.3.4_Tailoring_for_3.3_Incorporating_Software _Toc510937368 _Hlt510940753 _Hlt510940744 _Hlt510940787 _Hlt510940755 _Hlt510940764 _Hlt510940878 _Hlt509734528 _Hlt510940881 _Hlt509734434 _Hlt510940884 _Hlt509734532 _Hlt510940887 _Hlt509731919 _Hlt509734440 _Hlt509734538 _Hlt510940894 _Hlt510940897 _Hlt509734541 _Hlt510940899 _Ref326057785 _Ref326057817 _Ref326118793 _Ref326118830 _Ref326118852 _Toc342648312 _Toc353755562 _Ref417799683 _Toc501775004 _Toc501775484 _Toc501775732 _Toc501780334 _Toc509733966table37 _Hlt509734530 _Hlt509734540 _Hlt510940883 _Hlt510940895 _Hlt510940908 _Hlt510940911 _Hlt510940914 _Hlt510940916 _Hlt510940918 _Hlt510940934 _Hlt510940936 _Toc310070537 _Toc314543109 _Toc314562811 _Toc315072612 _Ref315165308 _Toc315587159 _Toc315587879 _Toc315685084 _Toc315688347 _Toc315847120 _Toc315847621 _Toc315848723 _Toc315849126 _Toc315858944 _Toc315861736 _Toc316189743 _Toc316280393 _Toc316450430 _Toc318707237 _Toc318872411 _Toc318881014 _Toc320692783 _Toc320693551 _Toc321291747 _Toc321294954 _Toc322364298 _Toc322364640 _Toc322364924 _Toc322728776 _Toc322980385 _Toc323506585 _Ref323657135 _Ref323657146 _Toc323677611 _Toc325167076 _Toc325219804 _Ref325669664 _Ref325669669 _Toc325743978 _Toc325849749 _Toc326050243 _Toc326122782 _Toc334261381 _Ref335734121 _Toc342648019 _Toc353613429 _Toc353613757 _Toc353614057 _Toc353949810 _Ref417802782 _Ref417802789 _Ref417890581 _Ref417890587 _Hlt510940939 _Hlt510940942 _Hlt510940946 _Hlt510940948 _Ref342104171 _Ref326118869 _Toc342648313 _Toc353755563 _Toc501775005 _Toc501775485 _Toc501775733 _Toc501780335 _Toc509733967table38 _Hlt510594206 _Hlt510594207 _Hlt510596950 _Hlt510940951 _Hlt510940955 _Hlt510940957 _Hlt510596983 _Hlt510940961 _Hlt510940964 _Hlt510941491 _Hlt510941494 _Hlt510941497 _Hlt510941500 _Hlt510941503 _Hlt510941506 _Hlt510941511 _Hlt510941531 _Ref326118887 _Toc342648314 _Toc353755564 _Ref417885184 _Toc501775006 _Toc501775486 _Toc501775734 _Toc501780336 _Toc509733968table39 _Hlt510597074 _Hlt510941575 _Hlt510941580 _Hlt510941582 _Hlt510941585 _Hlt510941588 _Hlt510941806 _Hlt510941810 _Ref334256484 _Toc334261424 _Toc342648063 _Toc353613473 _Toc353613801 _Toc353614105 _Toc353949855 _Toc353952993 _Toc354286946 _Toc418475101 _Hlt510941648 _Hlt510941813 _Hlt510941607 _Hlt510941591 _Toc310070548 _Toc314543120 _Toc314562822 _Toc315072623 _Toc315587170 _Toc315587890 _Toc315685096 _Toc315688359 _Toc315847132 _Toc315847633 _Toc315848735 _Toc315849138 _Toc315858956 _Toc315861748 _Toc316189755 _Ref316277343 _Toc316280405 _Toc316450442 _Toc318707249 _Ref318707590 _Ref318786495 _Toc318872440 _Toc318881043 _Ref319220408 _Toc320692812 _Toc320693580 _Toc321291776 _Toc321294983 _Toc322364326 _Toc322364668 _Toc322364967 _Toc322728815 _Toc322980429 _Toc323506629 _Ref323579538 _Ref323591411 _Toc323677656 _Toc325167121 _Toc325219849 _Toc325744023 _Ref325849156 _Toc325849794 _Toc326050288 _Toc326122828 _Ref333721081 _Toc334261430 _Toc342648065 _Toc353613475 _Toc353613803 _Toc353614107 _Toc353949857 _Toc353952995 _Toc354286948 _Ref417892428 _Ref417892444 _Toc418475103 _Toc318707250 _Ref318786516 _Toc318872441 _Toc318881044 _Ref319220423 _Toc320692813 _Toc320693581 _Toc321291777 _Toc321294984 _Toc322364327 _Toc322364669 _Toc322364971 _Toc322728819 _Toc322980433 _Toc323506633 _Ref323579493 _Toc323677660 _Toc325167125 _Toc325219853 _Toc325744027 _Toc325849798 _Toc326050292 _Toc326122832 _Ref333721088 _Toc334261434 _Toc342648066 _Toc353613476 _Toc353613804 _Toc353614108 _Toc353949858 _Toc353952996 _Toc354286949 _Toc418475104 _Hlt510941594 _Hlt510941816 _Toc320692814 _Toc320693582 _Toc321291778 _Toc321294985 _Toc322364328 _Toc322364670 _Toc322364972 _Toc322728820 _Toc322980434 _Toc323506634 _Toc323677661 _Toc325167126 _Toc325219854 _Toc325744028 _Toc325849799 _Toc326050293 _Toc326122833 _Ref333721099 _Toc334261435 _Toc342648067 _Toc353613477 _Toc353613805 _Toc353614109 _Toc353949859 _Toc353952997 _Toc354286950 _Toc418475105 _Hlt510941819 _Hlt510941822 _Hlt510941826 _Toc310070554 _Toc314543126 _Toc314562828 _Toc315072629 _Toc315587176 _Toc315587896 _Toc315685102 _Toc315688365 _Toc315847138 _Toc315847639 _Toc315848741 _Toc315849144 _Toc315858962 _Toc315861754 _Ref316100029 _Toc316189761 _Ref316277776 _Toc316280411 _Toc316450448 _Toc318707256 _Toc318872447 _Toc318881050 _Toc320692820 _Toc320693588 _Toc321291784 _Toc321294991 _Toc322364334 _Toc322364676 _Toc322364986 _Toc322728834 _Toc322980448 _Toc323506648 _Ref323579814 _Ref323592377 _Toc323677675 _Toc325167140 _Toc325219868 _Toc325744042 _Toc325849813 _Ref326040829 _Toc326050307 _Toc326122847 _Ref333721131 _Toc334261447 _Toc342648078 _Toc353613488 _Toc353613816 _Toc353614120 _Toc353949871 _Toc353953001 _Toc354286954 _Toc418475109 _Hlt510941829 _Hlt510941832 _Hlt510941834 _Hlt510941837 _Hlt510941839 _Ref326118902 _Toc342648315 _Toc353755565 _Toc501775007 _Toc501775487 _Toc501775735 _Toc501780337 _Toc509733969table310 _Hlt510597202 _Hlt510941844 _Hlt510941847 _Hlt510941849 _Hlt510941852 _Hlt510941854 _Hlt510941858 _Toc315858971 _Toc315861763 _Toc316189770 _Toc316280420 _Toc316450457 _Toc318707265 _Toc318872456 _Toc318881059 _Toc320692829 _Toc320693597 _Toc321291793 _Toc321295000 _Toc322364343 _Toc322364685 _Toc322365007 _Toc322728855 _Toc322980469 _Toc323506669 _Toc323677696 _Toc325167161 _Toc325219889 _Toc325744063 _Toc325849832 _Toc326050326 _Toc326122866 _Toc334261466 _Toc342648097 _Toc353613507 _Toc353613835 _Toc353614139 _Toc353949890 _Toc353953010 _Toc354286963 _Toc418475118 _Hlt510941861 _Toc315858972 _Toc315861764 _Toc316189771 _Toc316280421 _Toc316450458 _Toc318707266 _Toc318872457 _Toc318881060 _Toc320692830 _Toc320693598 _Toc321291794 _Toc321295001 _Toc322364344 _Toc322364686 _Toc322365008 _Toc322728856 _Toc322980470 _Toc323506670 _Ref323674616 _Toc323677697 _Toc325167162 _Toc325219890 _Toc325744064 _Toc325849833 _Toc326050327 _Toc326122867 _Toc334261467 _Toc342648098 _Toc353613508 _Toc353613836 _Toc353614140 _Toc353949891 _Toc353953011 _Toc354286964 _Toc418475119 _Hlt510941864 _Toc310070574 _Toc314543146 _Toc314562848 _Toc315072649 _Toc315587196 _Toc315587916 _Toc315685122 _Toc315688385 _Toc315847158 _Toc315847659 _Toc315848751 _Toc315849153 _Toc315858973 _Toc315861765 _Toc316189772 _Ref316206828 _Ref316280008 _Ref316280058 _Toc316280422 _Toc316450459 _Toc318707267 _Toc318872458 _Toc318881061 _Ref319228129 _Toc320692831 _Toc320693599 _Toc321291795 _Toc321295002 _Toc322364345 _Toc322364687 _Toc322365009 _Toc322728857 _Toc322980471 _Toc323506671 _Ref323581350 _Ref323652373 _Toc323677698 _Toc325167163 _Toc325219891 _Toc325744065 _Toc325849834 _Toc326050328 _Toc326122868 _Toc334261468 _Toc342648099 _Toc353613509 _Toc353613837 _Toc353614141 _Toc353949892 _Toc353953012 _Toc354286965 _Toc418475120 _Hlt510941867 _Hlt510941870 _Hlt510941873 _Hlt510941875 _Hlt510941878 _Hlt510941880 _Hlt510941882 _Ref326118931 _Toc342648316 _Toc353755566 _Toc501775008 _Toc501775488 _Toc501775736 _Toc501780338 _Toc509733970table311 _Toc334261389 _Toc342648027 _Toc353613437 _Toc353613765 _Toc353614065 _Toc353949819 _Toc353952967 _Toc354286920 _Toc418475075 _Hlt510941886 _Hlt510941889 _Hlt510941892 _Hlt510941925 _Hlt510941912 _Hlt510941961 _Toc315587150 _Toc315587871 _Toc315685076 _Toc315688339 _Toc315847112 _Toc315847613 _Toc315848715 _Toc315849118 _Toc315858936 _Toc315861728 _Toc316189735 _Ref316206984 _Toc316280385 _Toc316450422 _Toc318707229 _Toc318872421 _Toc318881024 _Toc320692793 _Toc320693561 _Toc321291757 _Toc321294964 _Toc322364307 _Toc322364649 _Toc322364933 _Toc322728785 _Toc322980399 _Toc323506599 _Ref323581852 _Toc323677625 _Toc325167090 _Toc325219818 _Toc325743992 _Toc325849763 _Toc326050257 _Toc326122796 _Ref333641394 _Toc334261392 _Toc342648030 _Toc353613440 _Toc353613768 _Toc353614068 _Toc353949822 _Toc353952970 _Toc354286923 _Ref417802510 _Ref417802515 _Toc418475078 _Hlt510941973 _Hlt510941928 _Hlt510941906 _Hlt510257651 _Hlt510942011 _Hlt510941976 _Hlt510942015 _Hlt510942018 _Hlt510942021 _Hlt510942023 _Toc310070580 _Toc314543152 _Toc314562854 _Toc315072654 _Toc315587201 _Toc315587921 _Toc315685127 _Toc315688390 _Toc315847163 _Toc315847664 _Toc315848756 _Toc315849158 _Toc315858980 _Toc315861772 _Toc316189778 _Ref316207066 _Toc316280428 _Toc316450465 _Toc318707273 _Toc318872464 _Toc318881067 _Toc320692837 _Toc320693605 _Toc321291801 _Toc321295008 _Toc322364351 _Toc322364693 _Toc322365015 _Toc322728862 _Toc322980476 _Toc323506676 _Ref323581748 _Toc323677703 _Toc325167168 _Toc325219896 _Toc325744070 _Toc325849839 _Toc326050333 _Toc326122873 _Toc334261473 _Toc342648104 _Toc353613514 _Toc353613842 _Toc353614146 _Toc353949897 _Toc353953017 _Toc354286970 _Toc418475125 _Hlt510942027 _Toc310070582 _Toc314543154 _Toc314562856 _Toc315072655 _Toc315587202 _Toc315587922 _Toc315685128 _Toc315688391 _Toc315847164 _Toc315847665 _Toc315848757 _Toc315849159 _Toc315858981 _Toc315861773 _Toc316189779 _Toc316280429 _Toc316450466 _Toc318707274 _Toc318872465 _Toc318881068 _Toc320692838 _Toc320693606 _Toc321291802 _Toc321295009 _Toc322364352 _Toc322364694 _Toc322365016 _Toc322728863 _Toc322980477 _Toc323506677 _Toc323677704 _Toc325167169 _Toc325219897 _Toc325744071 _Toc325849840 _Toc326050334 _Toc326122874 _Toc334261474 _Toc342648105 _Toc353613515 _Toc353613843 _Toc353614147 _Toc353949898 _Toc353953018 _Toc354286971 _Toc418475126 _Hlt510942029 _Ref326118952 _Toc342648317 _Toc353755567 _Toc501775009 _Toc501775489 _Toc501775737 _Toc501780339 _Toc509733971table312 _Ref326057831 _Ref326118812 _Ref326118966 _Toc342648318 _Toc353755568 _Ref417799657 _Toc501775010 _Toc501775490 _Toc501775738 _Toc501780340 _Toc509733972table313 _Hlt509734533 _Hlt509734542 _Hlt510940885 _Hlt510940898 _Hlt510942062 _Hlt510942065_4._SAFETY_CRITICAL_4._SAFETY_CRITICAL_1 _Toc510937369 _Toc353613418 _Toc353613746 _Toc353614046 _Toc353949799 _Toc353952954 _Toc354286907 _Ref417885069 _Ref417885073 _Ref417885076 _Toc418475061 _Toc510937370 _Toc501775011 _Toc501775491 _Toc501775739 _Toc501780341 _Toc509733973 _Hlt509733985table41 _Ref417803688 _Ref417803692 _Ref417803693 _Ref417885493 _Ref417885496 _Ref417885498 _Toc418475062_4.2_Software_Requirements _Toc510937371 _Hlt509731896 _Hlt510942072 _Toc353613420 _Toc353613748 _Toc353614048 _Toc353949801 _Toc353952956 _Toc354286909 _Ref417802170 _Ref417802200 _Ref417802202 _Ref417802239 _Ref417802869 _Ref417802873 _Ref417802877 _Ref417876834 _Ref417876838 _Ref417876840 _Toc418475063_4.2.1_Development_of _Toc510937372 _Hlt510942077 _Hlt509731890 _Toc353613421 _Toc353613749 _Toc353614049 _Toc353949802_4.2.1.1_Safety_Requirements _Toc510937373 _Hlt509731884 _Hlt510942082 _Toc353613422 _Toc353613750 _Toc353614050_4.2.2__Generic _Toc510937374 _Toc353949803 _Toc353952957 _Toc354286910 _Ref417799932 _Ref417799946 _Ref417799952 _Ref417876771 _Ref417876775 _Ref417876778 _Toc418475064 _Hlt509731877 _Hlt510942087 _Toc310070534 _Toc314543106 _Toc314562808 _Toc315072603 _Toc315587142 _Toc315587863 _Ref315600924 _Toc315685065 _Toc315688328 _Toc315847101 _Toc315847602 _Toc315848704 _Toc315849107 _Toc315858925 _Toc315861717 _Toc316189723 _Toc316280373 _Toc316450410 _Toc318707217 _Toc318872408 _Toc318881011 _Toc320692780 _Toc320693548 _Toc321291744 _Toc321294951 _Toc322364920 _Toc322728772 _Toc322980381 _Toc323506581 _Ref323654571 _Ref323654578 _Ref323662251 _Toc323677607 _Toc325167072 _Toc325219800 _Toc325743974 _Toc325849745 _Toc326050239 _Toc326122778 _Toc334261375 _Toc342648013 _Toc353613423 _Toc353613751 _Toc353614051 _Toc353949804 _Ref417801627 _Toc510937375 _Hlt510942096 _Hlt509731868 _Toc322364921 _Toc322728773 _Toc322980382 _Toc323506582 _Ref323656867 _Ref323656873 _Ref323661465 _Toc323677608 _Toc325167073 _Toc325219801 _Toc325743975 _Toc325849746 _Toc326050240 _Toc326122779 _Toc334261376 _Toc342648014 _Toc353613424 _Toc353613752 _Toc353614052 _Toc353949805 _Ref417803243 _Ref417889945 _Ref417889948_4.2.2.3__Hazardous _Toc510937376_4.2.2.2__Hazardous _Hlt510942099 _Hlt509731865 _Hlt509282707 _Toc315685067 _Toc315688330 _Toc315847103 _Toc315847604 _Toc315848706 _Toc315849109 _Toc315858927 _Toc315861719 _Toc316189725 _Toc316280375 _Toc316450412 _Toc318707219 _Toc318872410 _Toc318881013 _Toc320692782 _Toc320693550 _Toc321291746 _Toc321294953 _Toc322364923 _Toc322728775 _Toc322980384 _Toc323506584 _Ref323659233 _Ref323659239 _Toc323677610 _Toc325167075 _Toc325219803 _Toc325743977 _Toc325849748 _Toc326050242 _Toc326122781 _Toc334261378 _Toc342648016 _Toc353613426 _Toc353613754 _Toc353614054 _Toc353949807 _Ref417801158 _Ref417801161 _Ref417805404 _Toc510937377 _Hlt509731858 _Hlt510942104_4.2.4_Formal_Methods_4.2.3_Formal_Methods _Toc510937378 _Toc510937379 _Toc510937380 _4.2.5__Model _4.2.4__Model _Toc510937381 _Toc510937382 _Hlt501423997 _Toc510937383 _Toc510937384 _Toc310070538 _Toc314543110 _Toc314562812 _Toc315072613 _Ref315252186 _Toc315587160 _Toc315587880 _Toc315685085 _Toc315688348 _Toc315847121 _Toc315847622 _Toc315848724 _Toc315849127 _Toc315858945 _Toc315861737 _Toc316189744 _Ref316276655 _Toc316280394 _Toc316450431 _Toc318707238 _Toc318872429 _Toc318881032 _Toc320692801 _Toc320693569 _Toc321291765 _Toc321294972 _Toc322364315 _Toc322364657 _Toc322364950 _Toc322728798 _Toc322980412 _Toc323506612 _Ref323586637 _Ref323674179 _Toc323677639 _Toc325167104 _Toc325219832 _Toc325744006 _Toc325849777 _Toc326050271 _Toc326122810 _Toc334261382 _Toc342648020 _Toc353613430 _Toc353613758 _Toc353614058 _Toc353949811 _Toc353952959 _Toc354286912 _Ref417803192 _Ref417803194 _Ref417892061 _Ref417892089 _Ref417892095 _Toc418475066_4.2.6_Formal_Inspections_4.2.5_Formal_Inspections _Toc510937385 _Hlt510942124 _Hlt509734612 _Ref325874461 _Toc342648319 _Toc501775012 _Toc501775492 _Toc501775740 _Toc501780342 _Toc509733974table42 _Hlt509734717 _Hlt510942126 _Hlt510943125 _Toc325743979 _Toc325849750 _Toc326050244 _Ref326055118 _Toc326122783 _Ref333637299 _Toc334261383 _Ref334501152 _Ref315586707 _Toc315587144 _Toc315587865 _Toc315685068 _Toc315688331 _Toc315847104 _Toc315847605 _Toc315848707 _Toc315849110 _Toc315858928 _Toc315861720 _Toc316189726 _Toc316280376 _Toc316450413 _Toc318707220 _Toc318872412 _Toc318881015 _Toc320692784 _Toc320693552 _Toc321291748 _Toc321294955 _Toc322364299 _Toc322364641 _Toc322364925 _Toc322728777 _Toc322980386 _Toc323506586 _Toc323677612 _Toc325167077 _Toc325219805 _Ref334501298 _Toc342648021 _Toc353613431 _Toc353613759 _Toc353614059 _Toc353949812 _Toc353952960 _Toc354286913 _Ref417800929 _Ref417800998 _Ref417885569 _Ref417885572 _Ref417885574 _Ref417889122 _Ref417889126 _Toc418475067_4.2.7_Test_Planning _Toc510937386_4.3_Architectural_Design _Toc510937387 _Toc353949813 _Toc353952961 _Toc354286914 _Toc418475068 _Toc510937388 _Toc510937389 _Toc510937390 _Hlt509732564 _Toc510937391 _Toc334261379 _Toc342648017 _Toc353613427 _Toc353613755 _Toc353614055 _Toc353949808 _Toc353952958 _Toc354286911 _Toc418475065 _Toc510937392_4.2.3.1_Object_Oriented _Toc510937393 _Toc510937394 _Toc342648022 _Toc353613432 _Toc353613760 _Toc353614060 _Toc353949814 _Toc353952962 _Toc354286915 _Toc418475069_4.3.2_Selection_of _Toc510937395 _Hlt510594403 _Hlt510594890 _Hlt510594659 _Hlt510942144 _Hlt510257218 _Toc418475070_4.3.3_Selection_of _Toc510937396 _Hlt509734641 _Toc310070535 _Toc314543107 _Toc314562809 _Toc315072604 _Toc315587143 _Toc315587864 _Toc315685066 _Toc315688329 _Toc315847102 _Toc315847603 _Toc315848705 _Toc315849108 _Toc315858926 _Toc315861718 _Toc316189724 _Toc316280374 _Toc316450411 _Toc318707218 _Toc318872409 _Toc318881012 _Toc320692781 _Toc320693549 _Toc321291745 _Toc321294952 _Toc322364922 _Toc322728774 _Toc322980383 _Toc323506583 _Toc323677609 _Toc325167074 _Toc325219802 _Toc325743976 _Toc325849747 _Toc326050241 _Toc326122780 _Toc334261377 _Toc342648015 _Toc353613425 _Toc353613753 _Toc353614053 _Toc353949806 _Ref417803063 _Ref417803066_4.3.5_Coding_Standards _Toc510937397 _Toc510937398 _Toc315587145 _Toc315587866 _Toc315685069 _Toc315688332 _Toc315847105 _Toc315847606 _Toc315848708 _Toc315849111 _Toc315858929 _Toc315861721 _Toc316189727 _Toc316280377 _Toc316450414 _Toc318707221 _Ref318707890 _Toc318872413 _Toc318881016 _Toc320692785 _Toc320693553 _Toc321291749 _Toc321294956 _Toc322364300 _Toc322364642 _Toc322364926 _Toc322728778 _Toc322980392 _Toc323506592 _Ref323585080 _Toc323677618 _Toc325167083 _Toc325219811 _Toc325743985 _Toc325849756 _Toc326050250 _Toc326122789 _Ref333743662 _Toc334261384 _Toc342648023 _Toc353613433 _Toc353613761 _Toc353614061 _Toc353949815 _Toc353952963 _Toc354286916 _Ref417885651 _Ref417885654 _Ref417885656 _Toc418475071 _Hlt501512539 _Toc510937399 _Toc315072598 _Toc315587135 _Toc315587856 _Toc315685058 _Ref315171324 _Toc315587146 _Toc315587867 _Toc315685072 _Toc315688335 _Toc315847108 _Toc315847609 _Toc315848711 _Toc315849114 _Toc315858932 _Toc315861724 _Toc316189730 _Toc316280380 _Toc316450417 _Toc318707224 _Toc318872416 _Toc318881019 _Toc320692788 _Toc320693556 _Toc321291752 _Toc321294959 _Toc322364302 _Toc322364644 _Toc322364928 _Toc322728780 _Toc322980394 _Toc323506594 _Toc323677620 _Toc325167085 _Toc325219813 _Toc325743987 _Toc325849758 _Toc326050252 _Toc326122791 _Toc334261386 _Toc342648024 _Toc353613434 _Toc353613762 _Toc353614062 _Toc353949816 _Toc353952964 _Toc354286917 _Toc418475072 _Toc510937400 _Toc315587147 _Toc315587868 _Toc315685073 _Toc315688336 _Toc315847109 _Toc315847610 _Toc315848712 _Toc315849115 _Toc315858933 _Toc315861725 _Toc316189731 _Toc316280381 _Toc316450418 _Toc318707225 _Toc318872417 _Toc318881020 _Toc320692789 _Toc320693557 _Toc321291753 _Toc321294960 _Toc322364303 _Toc322364645 _Toc322364929 _Toc322728781 _Toc322980395 _Toc323506595 _Toc323677621 _Toc325167086 _Toc325219814 _Toc325743988 _Toc325849759 _Toc326050253 _Toc326122792 _Toc334261387 _Toc342648025 _Toc353613435 _Toc353613763 _Toc353614063 _Toc353949817 _Toc353952965 _Toc354286918 _Toc418475073_4.5.1_Coding_Checklists _Toc510937401 _Hlt509731807 _Hlt510942151 _Hlt510942154 _Hlt509731803 _Toc315587148 _Toc315587869 _Toc315685074 _Toc315688337 _Toc315847110 _Toc315847611 _Toc315848713 _Toc315849116 _Toc315858934 _Toc315861726 _Toc316189732 _Toc316280382 _Toc316450419 _Toc318707226 _Toc318872418 _Toc318881021 _Toc320692790 _Toc320693558 _Toc321291754 _Toc321294961 _Toc322364304 _Toc322364646 _Toc322364930 _Toc322728782 _Toc322980396 _Toc323506596 _Toc323677622 _Toc325167087 _Toc325219815 _Toc325743989 _Toc325849760 _Toc326050254 _Toc326122793 _Toc334261388 _Toc342648026 _Toc353613436 _Toc353613764 _Toc353614064 _Toc353949818 _Toc353952966 _Toc354286919 _Toc418475074_4.5.2_Defensive_Programming _Toc510937402 _Hlt510942160_4.5.3_Refactoring _Toc510937403_4.5.4_Unit_Level _Toc510937404 _Hlt510942168 _Hlt510942194 _Hlt509731793_4.6__Software _Toc510937405 _Toc315587149 _Toc315587870 _Toc315685075 _Toc315688338 _Toc315847111 _Toc315847612 _Toc315848714 _Toc315849117 _Toc315858935 _Toc315861727 _Toc316189733 _Toc316280383 _Toc316450420 _Toc318707227 _Ref318787285 _Toc318872419 _Toc318881022 _Toc320692791 _Toc320693559 _Toc321291755 _Toc321294962 _Toc322364305 _Toc322364647 _Toc322364931 _Toc322728783 _Toc322980397 _Toc323506597 _Ref323585096 _Toc323677623 _Toc325167088 _Toc325219816 _Toc325743990 _Toc325849761 _Toc326050255 _Toc326122794 _Ref333743167 _Ref333743189 _Toc334261390 _Toc342648028 _Toc353613438 _Toc353613766 _Toc353614066 _Toc353949820 _Toc353952968 _Toc354286921 _Ref417885767 _Ref417885771 _Ref417885791 _Ref417892679 _Ref417892685 _Toc418475076 _Toc310070578 _Toc314543150 _Toc314562852 _Toc315072652 _Toc315587199 _Toc315587919 _Toc315685125 _Toc315688388 _Toc315847161 _Toc315847662 _Toc315848754 _Toc315849156 _Toc315858978 _Toc315861770 _Toc316189734 _Ref316206980 _Toc316280384 _Toc316450421 _Toc318707228 _Toc318872420 _Toc318881023 _Toc320692792 _Toc320693560 _Toc321291756 _Toc321294963 _Toc322364306 _Toc322364648 _Toc322364932 _Toc322728784 _Toc322980398 _Toc323506598 _Ref323581812 _Toc323677624 _Toc325167089 _Toc325219817 _Toc325743991 _Toc325849762 _Toc326050256 _Toc326122795 _Toc334261391 _Toc342648029 _Toc353613439 _Toc353613767 _Toc353614067 _Toc353949821 _Toc353952969 _Toc354286922 _Toc418475077_4.6.1_Testing_Techniques _Toc510937406 _Toc510937407 _Hlt510942206 _Hlt509731777_4.6.3_Integration_Testing _Toc510937408 _Toc510937409 _Hlt510402889_4.6.4_System_Testing _Toc510937410_4.6.5__Software _Toc510937411_4.6.6_Regression_Testing _Hlt510942220 _Toc510937412_4.6.7_Software_Safety _Hlt509907339 _Toc316189736 _Ref316206987 _Toc316280386 _Toc316450423 _Toc318707230 _Toc318872422 _Toc318881025 _Toc320692794 _Toc320693562 _Toc321291758 _Toc321294965 _Toc322364308 _Toc322364650 _Toc322364934 _Toc322728786 _Toc322980400 _Toc323506600 _Ref323581892 _Toc323677626 _Toc325167091 _Toc325219819 _Toc325743993 _Toc325849764 _Toc326050258 _Toc326122797 _Toc334261393 _Toc342648031 _Toc353613441 _Toc353613769 _Toc353614069 _Toc353949823 _Toc353952971 _Toc354286924 _Toc418475079_4.6.6_Test_Witnessing _Toc510937413 _Hlt509907341 _Hlt510942227_4.6.7_COTS_and _Toc315587152 _Toc315587873 _Toc315685078 _Toc315688341 _Toc315847114 _Toc315847615 _Toc315848717 _Toc315849120 _Toc315858938 _Toc315861730 _Toc316189737 _Toc316280387 _Toc316450424 _Toc318707231 _Toc318872423 _Toc318881026 _Toc320692795 _Toc320693563 _Toc321291759 _Toc321294966 _Toc322364309 _Toc322364651 _Toc322364935 _Toc322728787 _Toc322980401 _Toc323506601 _Toc323677627 _Toc325167092 _Toc325219820 _Toc325743994 _Toc325849765 _Toc326050259 _Toc326122798 _Toc334261395 _Toc342648033 _Toc353613443 _Toc353613771 _Toc353614071 _Toc353949825 _Toc353952973 _Toc354286926 _Toc418475081 _Toc510937414 _Ref333642728 _Toc334261396 _Toc342648034 _Toc353613444 _Toc353613772 _Toc353614072 _Toc353949826 _Toc353952974 _Toc354286927 _Toc418475082_4.8_Software_Operations _Toc510937415_5._SOFTWARE_SAFETY_5._SOFTWARE_SAFETY_1 _Toc510937416_5._SOFTWARE_SAFETY_2 _Toc310070524 _Toc314543096 _Toc314562798 _Toc353614074 _Toc353614075 _Toc353614076 _Toc353614077 _Hlt510942806 _Hlt510942807 _Hlt510942809 _Hlt510942812 _Hlt510942814 _Hlt510942827 _Hlt510942869 _Hlt510942831 _Hlt510942871 _Ref333645076 _Toc334261398 _Toc342648036 _Toc353613446 _Toc353613774 _Toc353614078 _Toc353949828 _Toc353952976 _Toc354286929 _Toc418475084_5.1_Software_Safety _Toc510937417 _Toc315072608 _Toc315587155 _Toc315587876 _Toc315685081 _Toc315688344 _Toc315847117 _Toc315847618 _Toc315848720 _Toc315849123 _Toc315858941 _Toc315861733 _Toc316189740 _Ref316276177 _Toc316280390 _Toc316450427 _Toc318707234 _Ref318707825 _Ref318786876 _Toc318872426 _Toc318881029 _Toc320692798 _Toc320693566 _Toc321291762 _Toc321294969 _Toc322364312 _Toc322364654 _Toc322364938 _Toc322728790 _Toc322980404 _Toc323506604 _Ref323586247 _Ref323658535 _Ref323658540 _Toc323677630 _Toc325167095 _Ref325275871 _Ref325275878 _Toc325219823 _Toc325743997 _Toc325849768 _Toc326050262 _Toc326122801 _Toc334261399 _Ref334502564 _Ref334502568 _Toc342648037 _Toc353613447 _Toc353613775 _Toc353614079 _Toc353949829 _Toc353952977 _Toc354286930 _Ref417800760 _Toc418475085_5.1.1_Software_Safety _Toc510937418 _Hlt510942876 _Hlt509731632 _Hlt509731610 _Ref316276317 _Toc322364939 _Toc322728791 _Toc322980405 _Toc323506605 _Toc323677631 _Toc325167096 _Toc325219824 _Toc325743998 _Toc325849769 _Toc326050263 _Toc326122802 _Toc334261400 _Toc342648038 _Toc353613448 _Toc353613776 _Toc353614080 _Toc353949830_5.1.1.1_Checklists_and _Toc510937419 _Hlt510942883 _Hlt509731597 _Hlt510942886 _Toc310070531 _Toc314543103 _Toc314562805 _Toc315072609 _Toc315587156 _Toc315587877 _Toc315685082 _Toc315688345 _Toc315847118 _Toc315847619 _Toc315848721 _Toc315849124 _Toc315858942 _Toc315861734 _Toc316189741 _Ref316205291 _Ref316276368 _Toc316280391 _Toc316450428 _Toc318707235 _Toc318872427 _Toc318881030 _Toc320692799 _Toc320693567 _Toc321291763 _Toc321294970 _Toc322364313 _Toc322364655 _Toc322364940 _Toc322728792 _Toc322980406 _Toc323506606 _Ref323578929 _Ref323586340 _Ref323658395 _Ref323659391 _Ref323661709 _Ref323661714 _Toc323677632 _Toc325167097 _Ref325273608 _Toc325219825 _Toc325743999 _Toc325849770 _Toc326050264 _Ref326054674 _Ref326054688 _Ref326054758 _Toc326122803 _Toc334261401 _Toc342648039 _Toc353613449 _Toc353613777 _Toc353614081 _Toc353949831 _Toc353952978 _Toc354286931 _Ref417789567 _Ref417789574 _Ref417789579 _Ref417803321 _Ref417887281 _Ref417887283 _Ref417889334 _Toc418475086_5.1.2_Requirements_Criticality _Toc510937420 _Hlt510943067 _Hlt510943057 _Hlt510943023 _Hlt509731589 _Hlt510943079 _Toc323677633 _Toc325167098 _Toc325219826 _Toc325744000 _Toc325849771 _Toc326050265 _Toc326122804 _Ref333040078 _Ref333040086 _Ref333040091 _Toc334261402 _Toc342648040 _Toc353613450 _Toc353613778 _Toc353614082 _Toc353949832 _Ref417803140 _Ref417803142_5.1.2.1_Critical_Software _Toc510937421 _Hlt510943055 _Hlt510943101 _Hlt496947073 _Hlt510943083 _Ref326054577 _Ref326054603 _Toc342648320 _Toc353755569 _Toc501775013 _Toc501775493 _Toc501775741 _Toc501780343 _Toc509733975 _Hlt509733992table51 _Hlt509736747 _Toc310070532 _Toc314543104 _Toc314562806 _Toc315072610 _Toc310070536 _Toc314543108 _Toc314562810 _Toc315072611 _Ref315165255 _Ref315170686 _Toc315587158 _Toc315587878 _Toc315685083 _Toc315688346 _Toc315847119 _Toc315847620 _Toc315848722 _Toc315849125 _Toc315858943 _Toc315861735 _Toc316189742 _Ref316205419 _Ref316276592 _Toc316280392 _Toc316450429 _Toc318707236 _Toc318872428 _Toc318881031 _Toc320692800 _Toc320693568 _Toc321291764 _Toc321294971 _Toc322364314 _Toc322364656 _Toc322364942 _Toc322728793 _Toc322980407 _Toc323506607 _Ref323578965 _Ref323586488 _Ref323655219 _Toc323677634 _Toc325167099 _Toc325219827 _Toc325744001 _Toc325849772 _Toc326050266 _Toc326122805 _Toc334261403 _Toc342648041 _Toc353613451 _Toc353613779 _Toc353614083 _Toc353949833 _Toc353952979 _Toc354286932 _Hlt509731517 _Hlt510943107 _Hlt496947023 _Hlt496947024 _Hlt510943111 _Hlt501778533 _Hlt509731508 _Ref417713971 _Toc418475087_5.1.3_Specification_Analysis _Toc510937422 _Toc322364944 _Toc322728795 _Toc322980409 _Toc323506609 _Toc323677636 _Toc325167101 _Toc325219829 _Toc325744003 _Toc325849774 _Toc326050268 _Toc326122807 _Toc334261405 _Toc342648043 _Toc353613453 _Toc353613781 _Toc353614085 _Toc353949835 _Toc510937423 _Toc322364945 _Toc322728796 _Toc322980410 _Toc323506610 _Toc323677637 _Toc325167102 _Toc325219830 _Toc325744004 _Toc325849775 _Toc326050269 _Toc326122808 _Toc334261406 _Toc342648044 _Toc353613454 _Toc353613782 _Toc353614086 _Toc353949836 _Toc510937424 _Toc353613455 _Toc353613783 _Toc353614087 _Toc353949837 _Toc510937425 _Toc342648046 _Toc353613456 _Toc353613784 _Toc353614088 _Toc353949838 _Toc353952980 _Toc354286933 _Ref417802966 _Ref417802970 _Ref417802971 _Toc418475088_5.1.4_Formal_Inspections _Toc510937426 _Hlt510943117 _Hlt510943120 _Hlt509734715 _Hlt510943123 _Hlt509731489 _Toc310070551 _Toc314543123 _Toc314562825 _Toc315072626 _Toc315587173 _Toc315587893 _Toc315685086 _Toc315688349 _Toc315847122 _Toc315847623 _Toc315848725 _Toc315849128 _Toc315858946 _Toc315861738 _Toc316189745 _Toc316280395 _Toc316450432 _Toc318707239 _Toc318872430 _Toc318881033 _Toc320692802 _Toc320693570 _Toc321291766 _Toc321294973 _Toc322364316 _Toc322364658 _Toc322364951 _Toc322728799 _Toc322980413 _Toc323506613 _Ref323592131 _Ref323592139 _Ref323654306 _Ref323654400 _Ref323654406 _Ref323654415 _Toc323677640 _Toc325167105 _Toc325219833 _Toc325744007 _Toc325849778 _Toc326050272 _Toc326122811 _Ref333731742 _Toc334261408 _Toc342648047 _Toc353613457 _Toc353613785 _Toc353614089 _Toc353949839 _Toc353952981 _Toc354286934 _Ref417890117 _Ref417890120 _Toc418475089_5.1.5_Timing,_Throughput _Toc510937427 _Toc326122812 _Toc334261409 _Toc342648048 _Toc353613458 _Toc353613786 _Toc353614090 _Toc353949840 _Toc353952982 _Toc354286935 _Toc418475090_5.1.6_Software_Fault _Toc510937428 _Hlt510943130 _Hlt510943150 _Toc510937429 _Hlt509731476 _Hlt510943154 _Hlt510943158 _Hlt509731471 _Toc310070539 _Toc314543111 _Toc314562813 _Toc315072614 _Toc315587161 _Toc315587881 _Toc315685087 _Toc315688350 _Toc315847123 _Toc315847624 _Toc315848726 _Toc315849129 _Toc315858947 _Toc315861739 _Toc316189746 _Toc316280396 _Toc316450433 _Toc318707240 _Toc318872431 _Toc318881034 _Toc320692803 _Toc320693571 _Toc321291767 _Toc321294974 _Toc322364317 _Toc322364659 _Toc322364952 _Toc322728800 _Toc322980414 _Toc323506614 _Ref323585232 _Toc323677641 _Toc325167106 _Toc325219834 _Toc325744008 _Toc325849779 _Ref326040341 _Ref326040429 _Toc326050273 _Ref326055163 _Toc326122813 _Ref333644703 _Toc334261410 _Ref334512586 _Ref334512730 _Toc342648049 _Toc353613459 _Toc353613787 _Toc353614091 _Toc353949841 _Toc353952983 _Toc354286936 _Ref417886103 _Ref417886106 _Ref417886107 _Ref417889194 _Ref417889198 _Toc418475091_5.2_Architectural_Design _Toc510937430 _Hlt509731465 _Hlt510943161 _Hlt510943165 _Hlt509731459 _Toc310070540 _Toc314543112 _Toc314562814 _Toc315072615 _Toc315587162 _Toc315587882 _Toc315685088 _Toc315688351 _Toc315847124 _Toc315847625 _Toc315848727 _Toc315849130 _Toc315858948 _Toc315861740 _Toc316189747 _Ref316206130 _Ref316277024 _Toc316280397 _Toc316450434 _Toc318707241 _Toc318872432 _Toc318881035 _Toc320692804 _Toc320693572 _Toc321291768 _Toc321294975 _Toc322364318 _Toc322364660 _Toc322364953 _Toc322728801 _Toc322980415 _Toc323506615 _Ref323579036 _Ref323590821 _Ref323661731 _Ref323661745 _Toc323677642 _Toc325167107 _Toc325219835 _Toc325744009 _Toc325849780 _Toc326050274 _Toc326122814 _Toc334261411 _Toc342648050 _Toc353613460 _Toc353613788 _Toc353614092 _Toc353949842 _Toc353952984 _Toc354286937 _Ref417789341 _Ref417789427 _Ref417889848 _Toc418475092_5.2.1_Update_Criticality _Toc510937431 _Hlt510943168 _Hlt510943173 _Hlt510257592 _Hlt510943176 _Hlt510943179 _Toc310070541 _Toc314543113 _Toc314562815 _Toc315072616 _Toc315587163 _Toc315587883 _Toc315685089 _Toc315688352 _Toc315847125 _Toc315847626 _Toc315848728 _Toc315849131 _Toc315858949 _Toc315861741 _Toc316189748 _Ref316206145 _Ref316277053 _Toc316280398 _Toc316450435 _Toc318707242 _Toc318872433 _Toc318881036 _Toc320692805 _Toc320693573 _Toc321291769 _Toc321294976 _Toc322364319 _Toc322364661 _Toc322364954 _Toc322728802 _Toc322980416 _Toc323506616 _Ref323579108 _Ref323590838 _Toc323677643 _Toc325167108 _Toc325219836 _Toc325744010 _Toc325849781 _Toc326050275 _Toc326122815 _Toc334261412 _Toc342648051 _Toc353613461 _Toc353613789 _Toc353614093 _Toc353949843 _Toc353952985 _Toc354286938 _Toc418475093_5.2.2_Conduct_Hazard _Toc510937432 _Hlt509731449 _Hlt510943182 _Hlt509731445 _Hlt510943185 _Hlt509731419 _Hlt510943190 _Toc310070542 _Toc314543114 _Toc314562816 _Toc315072617 _Toc315587164 _Toc315587884 _Toc315685090 _Toc315688353 _Toc315847126 _Toc315847627 _Toc315848729 _Toc315849132 _Toc315858950 _Toc315861742 _Toc316189749 _Ref316206151 _Ref316277081 _Toc316280399 _Toc316450436 _Toc318707243 _Toc318872434 _Toc318881037 _Toc320692806 _Toc320693574 _Toc321291770 _Toc321294977 _Toc322364320 _Toc322364662 _Toc322364955 _Toc322728803 _Toc322980417 _Toc323506617 _Ref323579163 _Ref323590845 _Toc323677644 _Toc325167109 _Toc325219837 _Toc325744011 _Toc325849782 _Toc326050276 _Toc326122816 _Toc334261413 _Toc342648052 _Toc353613462 _Toc353613790 _Toc353614094 _Toc353949844 _Toc353952986 _Toc354286939 _Toc418475094_5.2.3_Analyze_Architectural _Toc510937433 _Hlt510943195 _Hlt509731412 _Toc322364956 _Toc322728804 _Toc322980418 _Toc323506618 _Toc323677645 _Toc325167110 _Toc325219838 _Toc325744012 _Toc325849783 _Toc326050277 _Toc326122817 _Toc334261414 _Toc342648053 _Toc353613463 _Toc353613791 _Toc353614095 _Toc353949845 _Toc510937434 _Toc322364957 _Toc322728805 _Toc322980419 _Toc323506619 _Toc323677646 _Toc325167111 _Toc325219839 _Toc325744013 _Toc325849784 _Toc326050278 _Toc326122818 _Toc334261415 _Toc342648054 _Toc353613464 _Toc353613792 _Toc353614096 _Toc353949846 _Toc510937435 _Toc334261416 _Toc342648055 _Toc353613465 _Toc353613793 _Toc353614097 _Toc353949847 _Toc353952987 _Toc354286940 _Toc418475095 _Toc510937436 _Toc310070543 _Toc314543115 _Toc314562817 _Toc315072618 _Toc315587165 _Toc315587885 _Toc315685091 _Toc315688354 _Toc315847127 _Toc315847628 _Toc315848730 _Toc315849133 _Toc315858951 _Toc315861743 _Toc316189750 _Ref316206219 _Ref316277103 _Toc316280400 _Toc316450437 _Toc318707244 _Toc318872435 _Toc318881038 _Ref319220538 _Toc320692807 _Toc320693575 _Toc321291771 _Toc321294978 _Toc322364321 _Toc322364663 _Toc322364958 _Toc322728806 _Toc322980420 _Toc323506620 _Ref323579205 _Ref323590960 _Toc323677647 _Toc325167112 _Toc325219840 _Toc325744014 _Toc325849785 _Toc326050279 _Toc326122819 _Toc334261417 _Toc334261998 _Toc342648056 _Toc353613466 _Toc353613794 _Toc353614098 _Toc353949848!_5.2.4.1_Interdependence_Analysis _Toc510937437 _Toc310070544 _Toc314543116 _Toc314562818 _Toc315072619 _Toc315587166 _Toc315587886 _Toc315685092 _Toc315688355 _Toc315847128 _Toc315847629 _Toc315848731 _Toc315849134 _Toc315858952 _Toc315861744 _Toc316189751 _Ref316206222 _Ref316277211 _Toc316280401 _Toc316450438 _Toc318707245 _Toc318872436 _Toc318881039 _Ref319220542 _Toc320692808 _Toc320693576 _Toc321291772 _Toc321294979 _Toc322364322 _Toc322364664 _Toc322364959 _Toc322728807 _Toc322980421 _Toc323506621 _Ref323581123 _Ref323590969 _Toc323677648 _Toc325167113 _Toc325219841 _Toc325744015 _Toc325849786 _Toc32605028      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnpqrstuvwxyz{|}~0 _Toc326122820 _Toc334261418_5.2.4.2__Independence _Toc342648057 _Toc353613467 _Toc353613795 _Toc353614099 _Toc353949849 _Toc510937438 _Hlt509288780 _Hlt510943200 _Hlt509731389_5.2.5_Update_Timing, _Toc510937439_5.2.6_Update_Software _Toc510937440_5.2.7_Formal_Inspections _Toc510937441 _Hlt510943206 _Hlt509731383_5.2.8__Formal _Toc510937442_5.3_Detailed_Design _Toc353613468 _Toc353613796 _Toc353614100 _Toc353949850 _Toc353952988 _Toc354286941 _Ref354960293 _Ref417886154 _Ref417886156 _Ref417886160 _Ref417892160 _Ref417892167 _Ref417892171 _Toc418475096 _Toc510937443 _Hlt509731372 _Hlt510943289 _Toc310070546 _Toc314543118 _Toc314562820 _Toc315072621 _Toc315587168 _Toc315587888 _Toc315685094 _Toc315688357 _Toc315847130 _Toc315847631 _Toc315848733 _Toc315849136 _Toc315858954 _Toc315861746 _Toc316189753 _Ref316206309 _Toc316280403 _Toc316450440 _Toc318707247 _Toc318872438 _Toc318881041 _Toc320692810 _Toc320693578 _Toc321291774 _Toc321294981 _Toc322364324 _Toc322364666 _Toc322364961 _Toc322728809 _Toc322980423 _Toc323506623 _Ref323579332 _Toc323677650 _Toc325167115 _Toc325219843 _Toc325744017 _Toc325849788 _Toc326050282 _Toc326122822 _Ref333721050 _Ref333738127 _Toc334261420 _Toc342648059 _Toc353613469 _Toc353613797 _Toc353614101 _Toc353949851 _Toc353952989 _Toc354286942 _Toc418475097_5.3.1_Design_Logic _Toc510937444 _Toc310070549 _Toc314543121 _Toc314562823 _Toc315072624 _Toc315587171 _Toc315587891 _Toc315685097 _Toc315688360 _Toc315847133 _Toc315847634 _Toc315848736 _Toc315849139 _Toc315858957 _Toc315861749 _Toc316189756 _Ref316206320 _Ref316206382 _Ref316277406 _Toc316280406 _Toc316450443 _Toc318707251 _Toc318872442 _Toc318881045 _Toc320692815 _Toc320693583 _Toc321291779 _Toc321294986 _Toc322364329 _Toc322364671 _Toc322364973 _Toc322728821 _Toc322980435 _Toc323506635 _Ref323579661 _Ref323591554 _Toc323677662 _Toc325167127 _Toc325219855 _Toc325744029 _Toc325849800 _Toc326050294 _Toc326122834 _Ref333721054 _Toc334261421 _Toc342648060 _Toc353613470 _Toc353613798 _Toc353614102 _Toc353949852 _Toc353952990 _Toc354286943 _Toc418475098_5.3.2_Design_Data _Toc510937445 _Toc310070550 _Toc314543122 _Toc314562824 _Toc315072625 _Toc315587172 _Toc315587892 _Toc315685098 _Toc315688361 _Toc315847134 _Toc315847635 _Toc315848737 _Toc315849140 _Toc315858958 _Toc315861750 _Toc316189757 _Ref316277539 _Toc316280407 _Toc316450444 _Toc318707252 _Toc318872443 _Toc318881046 _Toc320692816 _Toc320693584 _Toc321291780 _Toc321294987 _Toc322364330 _Toc322364672 _Toc322364974 _Toc322728822 _Toc322980436 _Toc323506636 _Ref323591696 _Toc323677663 _Toc325167128 _Toc325219856 _Toc325744030 _Toc325849801 _Toc326050295 _Toc326122835 _Ref333721060 _Toc334261422 _Toc342648061 _Toc353613471 _Toc353613799 _Toc353614103 _Toc353949853 _Toc353952991 _Toc354286944 _Toc418475099_5.3.3_Design_Interface _Toc510937446 _Toc310070552 _Toc314543124 _Toc314562826 _Toc315072627 _Toc315587174 _Toc315587894 _Toc315685100 _Toc315688363 _Toc315847136 _Toc315847637 _Toc315848739 _Toc315849142 _Toc315858960 _Ref315860714 _Toc315861752 _Toc316189759 _Ref316206386 _Ref316277660 _Toc316280409 _Toc316450446 _Toc318707254 _Toc318872445 _Toc318881048 _Toc320692818 _Toc320693586 _Toc321291782 _Toc321294989 _Toc322364332 _Toc322364674 _Toc322364976 _Toc322728824 _Toc322980438 _Toc323506638 _Ref323579705 _Ref323591806 _Ref323674548 _Toc323677665 _Toc325167130 _Toc325219858 _Toc325744032 _Toc325849803 _Toc326050297 _Toc326122837 _Ref333721064 _Toc334261423 _Ref342201279 _Toc342648062 _Toc353613472 _Toc353613800 _Toc353614104 _Toc353949854 _Toc353952992 _Toc354286945 _Ref417892547 _Ref417892555 _Toc418475100_5.3.4_Design_Constraint _Toc510937447 _Hlt509731364 _Hlt510943295_5.3.5_Rate_Monotonic _Toc510937448_5.3.5_Design_Functional _Hlt510941615 _Toc510937449_5.3.6_Software_Element _Hlt510941658 _Toc510937450_5.3.7_Rate_Monotonic _Hlt510838215_5.3.6_Software_Fault_5.3.6_Dynamic_Flowgraph _Toc510937451_5.3.8_Dynamic_Flowgraph _Hlt510943300 _Hlt509731355_5.3.7_Markov_Modeling _Toc510937452_5.3.9_Markov_Modeling _Toc315685099 _Toc315688362 _Toc315847135 _Toc315847636 _Toc315848738 _Toc315849141 _Toc315858959 _Toc315861751 _Toc316189758 _Ref316277564 _Toc316280408 _Toc316450445 _Toc318707253 _Toc318872444 _Toc318881047 _Toc320692817 _Toc320693585 _Toc321291781 _Toc321294988 _Toc322364331 _Toc322364673 _Toc322364975 _Toc322728823 _Toc322980437 _Toc323506637 _Ref323591712 _Toc323677664 _Toc325167129 _Toc325219857 _Toc325744031 _Toc325849802 _Toc326050296 _Toc326122836 _Ref333721105 _Toc334261436 _Toc342648068 _Toc353613478 _Toc353613806 _Toc353614110 _Toc353949860 _Toc353952998 _Toc354286951 _Toc418475106_5.3.8_Measurement_of _Toc510937453_5.3.10_Measurement_of_5.3.8.1_Function_Points _Toc510937454 _Toc510937455 _Hlt510943310 _Toc310070553 _Toc314543125 _Toc314562827 _Toc315072628 _Toc315587175 _Toc315587895 _Toc315685101 _Toc315688364 _Toc315847137 _Toc315847638 _Toc315848740 _Toc315849143 _Toc315858961 _Toc315861753 _Toc316189760 _Ref316277691 _Toc316280410 _Toc316450447 _Toc318707255 _Toc318872446 _Toc318881049 _Toc320692819 _Toc320693587 _Toc321291783 _Toc321294990 _Toc322364333 _Toc322364675 _Toc322364977 _Toc322728825 _Toc322980439 _Toc323506639 _Ref323592310 _Ref323677246 _Toc323677666 _Toc325167131 _Toc325219859 _Toc325744033 _Toc325849804 _Toc326050298 _Toc326122838 _Ref333721113 _Toc334261437 _Ref342276621 _Toc342648069 _Toc353613479 _Toc353613807 _Toc353614111 _Toc353949861 _Toc353952999 _Toc354286952 _Ref417802363 _Ref417802402 _Toc418475107_5.3.9_Selection_of _Toc510937456_5.3.11_Selection_of _Hlt510943321 _Hlt510943325_5.3.10_Formal_Methods _Toc510937457_5.3.12_Formal_Methods _Hlt501512653 _Toc310070564 _Toc314543136 _Toc314562838 _Toc315072639 _Toc315587186 _Toc315587906 _Toc315685112 _Toc315688375 _Toc315847148 _Toc315847649 _Toc315848743 _Toc315849145 _Toc315858963 _Toc315861755 _Toc316189762 _Ref316206463 _Ref316277366 _Ref316278095 _Toc316280412 _Toc316450449 _Toc318707257 _Toc318872448 _Toc318881051 _Ref319223822 _Toc320692821 _Toc320693589 _Toc321291785 _Toc321294992 _Toc322364335 _Toc322364677 _Toc322364987 _Toc322728835 _Toc322980449 _Toc323506649 _Ref323579753 _Ref323591519 _Ref323592391 _Ref323677339 _Toc323677676 _Toc325167141 _Toc325219869 _Toc325744043 _Toc325849814 _Ref326040876 _Toc326050308 _Toc326122848 _Ref333721140 _Toc334261448 _Toc342648079 _Toc353613489 _Toc353613817 _Toc353614121 _Toc353949872 _Toc353953002 _Toc354286955 _Toc418475110_5.3.11_Requirements_State _Toc510937458_5.3.13_Requirements_State _Hlt509731257 _Hlt510943338 _Hlt509731237 _Hlt510943342 _Hlt510943346 _Toc315848744 _Toc315849146 _Toc315858964 _Toc315861756 _Toc316189763 _Ref316278120 _Toc316280413 _Toc316450450 _Toc318707258 _Toc318872449 _Toc318881052 _Toc320692822 _Toc320693590 _Toc321291786 _Toc321294993 _Toc322364336 _Toc322364678 _Toc322364998 _Toc322728846 _Toc322980460 _Toc323506660 _Ref323592900 _Toc323677687 _Toc325167152 _Toc325219880 _Toc325744054 _Toc325849825 _Toc326050319 _Toc326122859 _Ref333721185 _Toc334261459 _Toc342648090 _Toc353613500 _Toc353613828 _Toc353614132 _Toc353949883 _Toc353953003 _Toc354286956 _Toc418475111_5.3.12_Formal_Inspections _Toc510937459_5.3.14_Formal_Inspections_5.3.13_Software_Failure _Toc510937460_5.3.15_Software_Failure _Hlt510943410 _Hlt510943353_5.3.14_Updates_to _Toc510937461_5.3.16_Updates_to _Toc310070568 _Toc314543140 _Toc314562842 _Toc315072643 _Toc315587190 _Toc315587910 _Toc315685116 _Toc315688379 _Toc315847152 _Toc315847653 _Toc315848745 _Toc315849147 _Toc315858965 _Toc315861757 _Toc316189764 _Toc316280414 _Toc316450451 _Toc318707259 _Toc318872450 _Toc318881053 _Ref319227956 _Toc320692823 _Toc320693591 _Toc321291787 _Toc321294994 _Toc322364337 _Toc322364679 _Toc322364999 _Toc322728847 _Toc322980461 _Toc323506661 _Ref323585269 _Toc323677688 _Toc325167153 _Toc325219881 _Toc325744055 _Toc325849826 _Toc326050320 _Toc326122860 _Ref333644737 _Toc334261460 _Toc342648091 _Toc353613501 _Toc353613829 _Toc353614133 _Toc353949884 _Toc353953004 _Toc354286957 _Ref417886295 _Ref417886306 _Ref417886345 _Ref417886357 _Toc418475112 _Toc510937462_5.4_Code_Analysis _Hlt510940376 _Toc310070570 _Toc314543142 _Toc314562844 _Toc315072645 _Toc315587192 _Toc315587912 _Toc315685118 _Toc315688381 _Toc315847154 _Toc315847655 _Toc315848747 _Toc315849149 _Toc315858966 _Toc315861758 _Toc316189765 _Ref316206658 _Ref316279861 _Toc316280415 _Toc316450452 _Toc318707260 _Toc318872451 _Toc318881054 _Toc320692824 _Toc320693592 _Toc321291788 _Toc321294995 _Toc322364338 _Toc322364680 _Toc322365000 _Toc322728848 _Toc322980462 _Toc323506662 _Ref323581158 _Ref323651802 _Ref323674794 _Toc323677689 _Toc325167154 _Toc325219882 _Toc325744056 _Toc325849827 _Toc326050321 _Toc326122861 _Toc334261461 _Toc342648092 _Toc353613502 _Toc353613830 _Toc353614134 _Toc353949885 _Toc353953005 _Toc354286958 _Toc418475113_5.4.1_Code_Logic _Toc510937463 _Toc315858968 _Toc315861760 _Toc316189767 _Toc316280417 _Toc316450454 _Toc318707262 _Toc318872453 _Toc318881056 _Toc320692826 _Toc320693594 _Toc321291790 _Toc321294997 _Toc322364340 _Toc322364682 _Toc322365004 _Toc322728852 _Toc322980466 _Toc323506666 _Ref323579386 _Ref323674807 _Toc323677693 _Toc325167158 _Toc325219886 _Toc325744060 _Toc325849829 _Toc326050323 _Toc326122863 _Toc334261463 _Toc342648094 _Toc353613504 _Toc353613832 _Toc353614136 _Toc353949887 _Toc353953007 _Toc354286960 _Toc418475115_5.4.2_Code_Data _Toc510937464 _Toc310070571 _Toc314543143 _Toc314562845 _Toc315072646 _Toc315587193 _Toc315587913 _Toc315685119 _Toc315688382 _Toc315847155 _Toc315847656 _Toc315848748 _Toc315849150 _Toc315858969 _Toc315861761 _Toc316189768 _Ref316206698 _Ref316279882 _Toc316280418 _Toc316450455 _Toc318707263 _Toc318872454 _Toc318881057 _Toc320692827 _Toc320693595 _Toc321291791 _Toc321294998 _Toc322364341 _Toc322364683 _Toc322365005 _Toc322728853 _Toc322980467 _Toc323506667 _Ref323581308 _Ref323651833 _Ref323674814 _Toc323677694 _Toc325167159 _Toc325219887 _Toc325744061 _Toc325849830 _Toc326050324 _Toc326122864 _Toc334261464 _Toc342648095 _Toc353613505 _Toc353613833 _Toc353614137 _Toc353949888 _Toc353953008 _Toc354286961 _Toc418475116 _Toc310070572 _Toc314543144 _Toc314562846 _Toc315072647 _Toc315587194 _Toc315587914 _Toc315685120 _Toc315688383 _Toc315847156 _Toc315847657 _Toc315848749 _Toc315849151 _Toc315858970 _Toc315861762 _Toc316189769 _Ref316206690 _Ref316279959 _Toc316280419 _Toc316450456 _Toc318707264 _Toc318872455 _Toc318881058 _Toc320692828 _Toc320693596 _Toc321291792 _Toc321294999 _Toc322364342 _Toc322364684 _Toc322365006 _Toc322728854 _Toc322980468 _Toc323506668 _Ref323581268 _Ref323652217 _Ref323674826 _Toc323677695 _Toc325167160 _Toc325219888 _Toc325744062 _Toc325849831 _Toc326050325 _Toc326122865 _Toc334261465 _Toc342648096 _Toc353613506 _Toc353613834 _Toc353614138 _Toc353949889 _Toc353953009 _Toc354286962 _Toc418475117_5.4.3_Code_Interface _Toc510937465 _Hlt509731216 _Hlt510943418 _Hlt510941863_5.4.4_Update_Measurement _Hlt508609802 _Toc510937466_5.4.5_Update_Design _Toc510937467 _Hlt509731207 _Hlt510943422 _Hlt509731192 _Hlt510943427_5.4.6_Formal_Code _Toc510937468 _Hlt510943431 _Hlt506113283 _Hlt506113284 _Hlt510943436 _Hlt510943487 _Hlt510943452 _Toc310070576 _Toc314543148 _Toc314562850 _Toc315072650 _Toc315587197 _Toc315587917 _Toc315685123 _Toc315688386 _Toc315847159 _Toc315847660 _Toc315848752 _Toc315849154 _Toc315858974 _Toc315861766 _Toc316189773 _Ref316206858 _Ref316280166 _Toc316280423 _Toc316450460 _Toc318707268 _Toc318872459 _Toc318881062 _Ref319223726 _Toc320692832 _Toc320693600 _Toc321291796 _Toc321295003 _Toc322364346 _Toc322364688 _Toc322365010 _Toc322728858 _Toc322980472 _Toc323506672 _Ref323581387 _Ref323652395 _Toc323677699 _Toc325167164 _Toc325219892 _Toc325744066 _Toc325849835 _Toc326050329 _Toc326122869 _Toc334261469 _Toc342648100 _Toc353613510 _Toc353613838 _Toc353614142 _Toc353949893 _Toc353953013 _Toc354286966 _Toc418475121_5.4.7_Applying_Formal _Toc510937469 _Toc310070573 _Toc314543145 _Toc314562847 _Toc315072648 _Toc315587195 _Toc315587915 _Toc315685121 _Toc315688384 _Toc315847157 _Toc315847658 _Toc315848750 _Toc315849152 _Toc315858976 _Toc315861768 _Toc316189775 _Ref316206673 _Ref316279981 _Toc316280425 _Toc316450462 _Toc318707270 _Toc318872461 _Toc318881064 _Toc320692834 _Toc320693602 _Toc321291798 _Toc321295005 _Toc322364348 _Toc322364690 _Toc322365012 _Toc322728859 _Toc322980473 _Toc323506673 _Ref323581217 _Ref323652241 _Toc323677700 _Toc325167165 _Toc325219893 _Toc325744067 _Toc325849836 _Toc326050330 _Toc326122870 _Toc334261470 _Toc342648101 _Toc353613511 _Toc353613839 _Toc353614143 _Toc353949894 _Toc353953014 _Toc354286967 _Toc418475122_5.4.8_Unused_Code _Toc510937470_5.4.9_Interrupt_Analysis _Toc510937471 _5.4.9__Final_5.4.10__Final _Toc510937472 _Hlt501512667 _Hlt509907345_5.4.11_Program_Slicing _Toc510937473_5.4.12_Update_Software _Toc510937474 _Hlt501512680 _Hlt510942861 _Toc310070577 _Toc314543149 _Toc314562851 _Toc315072651 _Toc315587198 _Toc315587918 _Toc315685124 _Toc315688387 _Toc315847160 _Toc315847661 _Toc315848753 _Toc315849155 _Toc315858977 _Toc315861769 _Toc316189776 _Toc316280426 _Toc316450463 _Toc318707271 _Ref318787313 _Toc318872462 _Toc318881065 _Ref319223913 _Toc320692835 _Toc320693603 _Toc321291799 _Toc321295006 _Toc322364349 _Toc322364691 _Toc322365013 _Toc322728860 _Toc322980474 _Toc323506674 _Ref323585386 _Toc323677701 _Toc325167166 _Toc325219894 _Toc325744068 _Toc325849837 _Ref326040912 _Toc326050331 _Toc326122871 _Ref333644816 _Toc334261471 _Toc342648102 _Toc353613512 _Toc353613840 _Toc353614144 _Toc353949895 _Toc353953015 _Toc354286968 _Ref417886480 _Ref417886489 _Ref417886533 _Toc418475123_5.5_Test_Analysis _Toc510937475 _Hlt510943495 _Hlt509731163 _Toc310070579 _Toc314543151 _Toc314562853 _Toc315072653 _Toc315587200 _Toc315587920 _Toc315685126 _Toc315688389 _Toc315847162 _Toc315847663 _Toc315848755 _Toc315849157 _Toc315858979 _Toc315861771 _Toc316189777 _Ref316207063 _Toc316280427 _Toc316450464 _Toc318707272 _Toc318872463 _Toc318881066 _Toc320692836 _Toc320693604 _Toc321291800 _Toc321295007 _Toc322364350 _Toc322364692 _Toc322365014 _Toc322728861 _Toc322980475 _Toc323506675 _Ref323581680 _Toc323677702 _Toc325167167 _Toc325219895 _Toc325744069 _Toc325849838 _Toc326050332 _Toc326122872 _Toc334261472 _Toc342648103 _Toc353613513 _Toc353613841 _Toc353614145 _Toc353949896 _Toc353953016 _Toc354286969 _Toc418475124_5.5.1_Test_Coverage _Toc510937476_5.5.2_Formal_Inspections _Toc510937477 _Hlt509729607 _Hlt509731153 _Hlt510943500_5.5.3_Reliability_Modeling _Toc510937478 _Toc510937479 _Toc510937480 _Toc510937481 _Toc510937482 _Toc510937483 _Hlt501528284 _Hlt501528276 _Hlt501528224_5.5.4_Checklists_of _Toc510937484 _Hlt510943511 _Hlt509731136_5.5.5_Test_Results _Toc510937485_5.5.6_Independent_Verification _Toc510937486 _Toc510937487 _Hlt500759212 _Hlt500759213 _Hlt500759368 _Hlt500759369 _Hlt500759626 _Hlt500759604 _Toc310070583 _Toc314543155 _Toc314562857 _Toc315072656 _Toc315587203 _Toc315587923 _Toc315685129 _Toc315688392 _Toc315847165 _Toc315847666 _Toc315848758 _Toc315849160 _Toc315858982 _Toc315861774 _Toc316189780 _Toc316280430 _Toc316450467 _Toc318707275 _Toc318872466 _Toc318881069 _Toc320692839 _Toc320693607 _Toc321291803 _Toc321295010 _Toc322364353 _Toc322364695 _Toc322365017 _Toc322728864 _Toc322980478 _Toc323506678 _Toc323677705 _Toc325167170 _Toc325219898 _Toc325744072 _Toc325849841 _Toc326050335 _Toc326122875 _Ref333644831 _Ref333645458 _Toc334261475 _Toc342648106 _Toc353613516 _Toc353613844 _Toc353614148 _Toc353949899 _Toc353953019 _Toc354286972 _Toc418475127_5.6_Operations_& _Toc510937488 _Hlt510943518 _Hlt509731127_6._Programming_Languages_6._Programming_Languages_1_6._SOFTWARE_DEVELOPMENT _Toc510937489 _Hlt509731288 _Hlt509718112 _Toc510937490 _Toc322364978 _Toc322728826 _Toc322980440 _Toc323506640 _Toc323677667 _Toc325167132 _Toc325219860 _Toc325744034 _Toc325849805 _Toc326050299 _Toc326122839 _Toc334261438 _Toc342648070 _Toc353613480 _Toc353613808 _Toc353614112 _Toc353949862 _Toc510937491 _Toc322364979 _Toc322728827 _Toc322980441 _Toc323506641 _Toc323677668 _Toc325167133 _Toc325219861 _Toc325744035 _Toc325849806 _Toc326050300 _Toc326122840 _Toc334261439 _Toc342648071 _Toc353613481 _Toc353613809 _Toc353614113 _Toc353949863 _Toc510937492 _Toc322364980 _Toc322728828 _Toc322980442 _Toc323506642 _Toc323677669 _Toc325167134 _Toc325219862 _Toc325744036 _Toc325849807 _Toc326050301 _Toc326122841 _Toc334261440 _Ref341848032 _Toc342648072 _Toc353613482 _Toc353613810 _Toc353614114 _Toc353949864 _Ref417890454 _Ref417890456 _Toc510937493 _Toc510937494 _Hlt500316057 _Hlt500316058 _Toc510937495 _Toc510937496 _Toc510937497 _Toc510937498 _Toc510937499 _Toc510937500 _Toc510937501 _Toc510937502 _Toc510937503 _Toc510937504 _Hlt501512694 _Toc354286953 _Toc418475108 _Toc510937505 _Toc322364985 _Toc322728833 _Toc322980447 _Toc323506647 _Toc323677674 _Toc325167139 _Toc325219867 _Toc325744041 _Toc325849812 _Toc326050306 _Toc326122846 _Toc334261446 _Toc342648077 _Toc353613487 _Toc353613815 _Toc353614119 _Toc353949869 _Toc510937506 _Toc510937507 _Hlt509288868 _Toc510937508 _Toc510937509 _Toc510937510 _Toc510937511 _Toc510937512 _Toc510937513 _Hlt501512715 _Toc510937514 _Toc510937515 _Toc510937516 _Toc510937517 _Toc510937518 _Toc510937519 _Toc510937520_6.10__Operating _Toc510937521 _Toc510937522 _Hlt509907347 _Toc510937523 _Hlt509907349 _Toc510937524 _Toc510937525 _Hlt501512737 _Hlt498942150 _Hlt498942151_6.11_Distributed_Computing _Toc510937526 _Hlt509288885 _Hlt509288948 _Toc510937527 _Toc510937528 _Toc510937529 _Toc510937530 _Toc510937531 _Toc510937532 _Toc510937533 _Hlt510943657 _Hlt509730897 _Toc510937534 _Hlt501513143 _Toc510937535 _Toc510937536 _Hlt501512746 _Toc510937537 _Hlt510943669 _Toc510937538 _Hlt510943675 _Hlt510943679 _Hlt510943683_6.14.3_Case_Study _Toc510937539 _Toc510937540_6.14.3.2_Testing_and _Toc510937541 _Toc510937542_6.15_Good_Programming _Toc510937543 _Toc510937544 _Toc310070597 _Toc314543169 _Toc314562871 _Toc315072659 _Toc315587206 _Toc315587926 _Toc315685132 _Toc315688395 _Toc315847168 _Toc315847669 _Toc315848761 _Toc315849163 _Toc315858985 _Toc315861777 _Toc316189783 _Toc316280433 _Toc316450470 _Toc318707278 _Toc318872469 _Toc318881072 _Toc320692842 _Toc320693614 _Toc321291810 _Toc321295017 _Toc322364360 _Toc322364702 _Toc322365024 _Toc322728865 _Toc322980479 _Toc323506679 _Toc323677706 _Toc325167171 _Toc325219899 _Toc325744073 _Toc325849842 _Toc326050336 _Toc326122876 _Ref333821314 _Ref333821320 _Toc334261476 _Toc342648107 _Toc353613517 _Toc353613845 _Toc353614149 _Toc353949900 _Toc353953020 _Toc354272829 _Toc418475128_7._SOFTWARE_ACQUISITION _Hlt510256487 _Toc510937545_7._SOFTWARE_ACQUISITION_1 _Hlt510938657_7._SOFTWARE_ACQUISITION_2 _Hlt510938750 _Hlt510939547 _Hlt510939807_7._SOFTWARE_ACQUISITION_3 _Hlt510938494 _Hlt510943698 _Hlt510943701_7.1_Off-the-Shelf_Software _Toc510937546 _Hlt510943706 _Hlt510943712 _Hlt509731820 _Hlt510596466 _Toc510937547 _Hlt510243523 _Hlt510943721 _Hlt510235398 _Hlt510235676 _Hlt510943734 _Toc510937548 _Hlt510943740 _Toc510937549 _Hlt510943759 _Hlt510943762 _Toc510937550 _Hlt510500729_7.1.2.3_Adding_new _Toc510937551_7.1.2.4_Dealing_with _Toc510937552 _Hlt509906959 _Toc510937553 _Hlt510941980 _Hlt510943744_7.1.3_OTS_Analyses _Toc510937554_7.1.4_Who_Testscurrent _Toc510937555 _Hlt510257344"_7.2_Contractor-developed_Software _Toc510937556 _Hlt510508768 _Toc510937557 _Toc510937558 _Toc510937559 _Hlt510943800 _Hlt510943803 _Hlt510943812 _Toc510937560 _Toc510937561 _Hlt510943818 _Toc510937562 _Toc510937563 _Toc510937564 _Toc510937565_7.2.2_Monitoring_Contractor _Toc510937566 _Toc510937567_7._REFERENCES_8._REFERENCES _Toc510937568 _Hlt509906752 _Hlt509716269 _Hlt510937830 _Hlt510838200 _Hlt499015490 _Hlt499015491 _Hlt499540497 _Hlt499540498 _Hlt501177740 _Hlt501177741 _Hlt510848522 _Hlt510252464 _Hlt510594888 _Hlt501439281 _Hlt509287164 _Hlt509287165 _APPENDIX_A _Toc510937569_Glossary_of_Terms _Toc510937570 _Hlt509282743 _APPENDIX_B _APPENDIX_B_1 _Toc510937571_APPENDIX_B_Software _Toc510937572 _Toc510937573 _Toc510937574 _Toc510937575 _Toc501780262 _Toc509733466 _Toc509733911 _Hlt509290904 _Toc501780263 _Toc509733467 _Toc509733912 _Toc501780264 _Toc509733468 _Toc509733913 _APPENDIX_C _APPENDIX_C_1 _Toc510937576_APPENDIX_C_Software _Hlt509289510 _Hlt509907351 _Toc510937577 _Toc510937578 _Toc510937579 _Toc510937580 _Toc501780265 _Toc509733469 _Toc509733914 _Toc510937581 _Toc510937582 _Toc501780266 _Toc509733470 _Toc509733915 _Hlt509652968 _Hlt509653188 _Toc510937583 _Toc510937584 _Toc510937585 _Toc510937586 _Toc510937587 _Toc510937588 _Toc510937589 _Toc510937590 _Toc510937591_D.4.8_Example_forms_C.4.8_Example_forms _Toc510937592 _APPENDIX_D _Toc510937593 _Toc310070565 _Toc314543137 _Toc314562839 _Toc315072640 _Toc315587187 _Toc315587907 _Toc315685113 _Toc315688376 _Toc315847149 _Toc315847650 _Toc322364988 _Toc322728836 _Toc322980450 _Toc323506650 _Toc323677677 _Toc325167142 _Toc325219870 _Toc325744044 _Toc325849815 _Toc326050309 _Toc326122849 _Toc334261449 _Toc342648080 _Toc353613490 _Toc353613818 _Toc353614122 _Toc353949873 _Toc510937594 _Toc322729371 _Ref323673781 _Toc323678329 _Toc325167402 _Toc325168128 _Toc325220248 _Toc325220731 _Ref325675940 _Ref325676114 _Toc325744939 _Toc325850416 _Toc326050654 _Toc342648523 _Toc353756865 _Ref417712364 _Toc418475284 _Toc501775014 _Toc501775600 _Hlt501775773 _Toc501780267 _Toc509733471 _Toc509733916 _Toc310070566 _Toc314543138 _Toc314562840 _Toc315072641 _Toc315587188 _Toc315587908 _Toc315685114 _Toc315688377 _Toc315847150 _Toc315847651 _Toc322364989 _Toc322728837 _Toc322980451 _Toc323506651 _Toc323677678 _Toc325167143 _Toc325219871 _Toc325744045 _Toc325849816 _Toc326050310 _Toc326122850 _Toc334261450 _Toc342648081 _Toc353613491 _Toc353613819 _Toc353614123 _Toc353949874 _Toc510937595 _Toc322364990 _Toc322728838 _Toc322980452 _Toc323506652 _Toc323677679 _Toc325167144 _Toc325219872 _Toc325744046 _Toc325849817 _Toc326050311 _Toc326122851 _Toc334261451 _Toc342648082 _Toc353613492 _Toc353613820 _Toc353614124 _Toc353949875 _Toc510937596 _Toc322364991 _Toc322728839 _Toc322980453 _Toc323506653 _Toc323677680 _Toc325167145 _Toc325219873 _Toc325744047 _Toc325849818 _Toc326050312 _Toc326122852 _Toc334261452 _Toc342648083 _Toc353613493 _Toc353613821 _Toc353614125 _Toc353949876 _Hlt501512791 _Toc510937597 _Ref315860941 _Toc322729372 _Toc323678336 _Toc325167410 _Toc325168129 _Toc325220255 _Toc325220732 _Toc325744940 _Toc325850417 _Toc326050655 _Toc342648524 _Toc353756866 _Toc418475285 _Toc501775015 _Toc501775601 _Toc501780268 _Toc509733472 _Toc509733917 _Toc322364992 _Toc322728840 _Toc322980454 _Toc323506654 _Toc323677681 _Toc325167146 _Toc325219874 _Toc325744048 _Toc325849819 _Toc326050313 _Toc326122853 _Toc334261453 _Toc342648084 _Toc353613494 _Toc353613822 _Toc353614126 _Toc353949877 _Toc510937598 _Toc322364993 _Toc322728841 _Toc322980455 _Toc323506655 _Toc323677682 _Toc325167147 _Toc325219875 _Toc325744049 _Toc325849820 _Toc326050314 _Toc326122854 _Toc334261454 _Toc342648085 _Toc353613495 _Toc353613823 _Toc353614127 _Toc353949878 _Toc510937599 _Toc322364994 _Toc322728842 _Toc322980456 _Toc323506656 _Toc323677683 _Toc325167148 _Toc325219876 _Toc325744050 _Toc325849821 _Toc326050315 _Toc326122855 _Toc334261455 _Toc342648086 _Toc353613496 _Toc353613824 _Toc353614128 _Toc353949879 _Toc510937600 _Toc322364995 _Toc322728843 _Toc322980457 _Toc323506657 _Toc323677684 _Toc325167149 _Toc325219877 _Toc325744051 _Toc325849822 _Toc326050316 _Toc326122856 _Toc334261456 _Toc342648087 _Toc353613497 _Toc353613825 _Toc353614129 _Toc353949880 _Toc510937601 _Toc322364996 _Toc322728844 _Toc322980458 _Toc323506658 _Toc323677685 _Toc325167150 _Toc325219878 _Toc325744052 _Toc325849823 _Toc326050317 _Toc326122857 _Toc334261457 _Toc342648088 _Toc353613498 _Toc353613826 _Toc353614130 _Toc353949881 _Toc510937602 _Toc310070567 _Toc314543139 _Toc314562841 _Toc315072642 _Toc315587189 _Toc315587909 _Toc315685115 _Toc315688378 _Toc315847151 _Toc315847652 _Toc322364997 _Toc322728845 _Toc322980459 _Toc323506659 _Toc323677686 _Toc325167151 _Toc325219879 _Toc325744053 _Toc325849824 _Toc326050318 _Toc326122858 _Toc334261458 _Toc342648089 _Toc353613499 _Toc353613827 _Toc353614131 _Toc353949882 _Toc510937603 _APPENDIX_E _Toc510937604_E.1_Checklist_for _Toc510937605 _Hlt510235682_E.2_Generic_Software _Toc510937606 _Toc510937607_E.4_Checklist_of _Toc510937608 _Hlt510943474_E.4_Checklist_of_1 _Hlt509907353 _Hlt501512813 _Hlt507993660 _Toc510937609 _Toc510937610 _Toc510937611 _Toc510937612 _Toc510937613 _Toc510937614 _Toc510937615 _Toc510937616 _Toc510937617 _Toc501775016 _Toc501775017 _Toc501775018 _Toc501775019 _Toc501775020 _Toc501775021 _Toc501775022 _Toc510937618 _Toc501775023 _Toc501775024 _Toc501775025 _Toc501775026 _Toc501775027 _Toc501775028 _Toc501775029 _Toc510937619 _Toc501775030 _Toc501775031 _Toc501775032 _Toc501775033 _Toc501775034 _Toc501775035 _Toc501775036 _Toc510937620 _Toc501775037 _Toc501775038 _Toc501775039 _Toc501775040 _Toc501775041 _Toc501775042 _Toc501775043 _Toc510937621 _Toc501775044 _Toc501775045 _Toc501775046 _Toc501775047 _Toc501775048 _Toc501775049 _Toc501775050 _Toc510937622 _Toc501775051 _Toc501775052 _Toc501775053 _Toc501775054 _Toc501775055 _Toc501775056 _Toc501775057 _Toc510937623 _Toc501775058 _Toc501775059 _Toc501775060 _Toc501775061 _Toc501775062 _Toc501775063 _Toc501775064LSS 1a1a?cxnnnnnnnnɓɓɓɓɓɓɓɓ`aabcdef677777777Nؚ15ţ ɦΦ\xΨpqqrrttttttttVacGG e e e e e e e e e AAAAAAAA!!!!!!!!!!!!!!!!!}%}%~%&&+'J.J.J.J.J.J.J.J.P.///555577>>@@@@@@@@@@@@@@@C1CCCCCCEEEEEEEEEEIIIIIIIIIIIK KcMcMcMdMN NNNOPBQGQHQHQHQHQHQHQHQHQHQHQHQHQHQHQHQHQHQOQQQR~RRRRUS]SSSVVVWWWWWWWW8XAXIXp]p]p]p]p]p]p]p]p]p]p]p]p]^=_`6auaa5bobxbb7cc;dddd=eueyfffJggg:hKiKiLiiiii&jnjjkkllIlllmSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnSnnss>Qҋpux6  >>>>>>>>ǢǢǢǢ&(FFF55778=XhjuuuuuuuuuuuuxxFѳѳµõǵ ľľľľľľľľľ[g*.37BZwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwx*1`l%l%vt777777777777777777777777777l----------------------------------------------------`D,,,------ W/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK7B?vvvvvvvxxI.37yy)<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<======>>>>>>>>>>Shjj                  RRRRRRRRR}!}!}!}!}!}!$$ % % % % %%%%%%%%%%%"-'------------------------------------------------YDrDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANANPPddduwݓ,&;=δδδδδδδδδδδ=====          8888888888+238777!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,#1#1#1#1y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8y8x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;<<2>8>#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#D#DyGGGRR]]]bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbooooooooooooooooooooooooooooooooooooooooooooooooooԜ >W  4YTjjjjjjjjjjjjOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOQ^^^^^^^^^^^^^^^^^^^^,1;K###T&T&T&T&U&U&U&U&U&U&U&U&''''''''''''''''''''''''''''''''''''''''''''''''''''''''k(p(r(r()))h*h*h*h*000000000000000000c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2Q4Q4Q4Q4Q4i6i6i6i6i6i6i6i6i6i6i6i6i66P9r;;;<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<PPPPPPPPPPPPVVVZZ[[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[x[o\o\__ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a acdGeGeepgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgpgi jQjVjkkmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm"q&qqqqqqqqqqqqqqqqqqquuuuuuuuuuuuuuuuuuxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{ffŃŃ~BLYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔԔ՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟՟˧˧ѧNN}ͰѰ]]W   LL[SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS                                                   1@AA    ? A QQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaDD!!!!!$$+++k,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,l,--N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.N.66:::::BFJMTzWWW\\]]^^aa9ggghhhuiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiOjTjlllllm #################BBvg );MMW]]ddd}}}}}}}}}}}}}}}}}}v7|y  #'*',,?<?<D<D<]]frkq}|ĄUVgn\6Y!A=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D=D?D@D@DADSDtKLNN3UZk_k_kFlHx}:^h|::;==LL$$g1!         E |H |H :L :L M M $U ] ^ +m +m +m ւ ւ d 6 6 6 6 ; [? :S j l l l l l l l +m +m +m Um Wm Wm Wm sm m q Ez p /    : : ֨ {  q H    > > e e e e e e e e e e e e e e e e e e e e e e e e e e e e B B B B B B B B B B B B B B B B C C C C C C 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 z z z z z z z z z z z z z z z z z z z                   ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' H H H H H H H H H H H H H H H H H H                                     6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" 6" . . . . . . . . . . . . . . . . . . . . . . . . . . . . c9 c9 n9 n9 t9 o` o` y       v  ϵ 3 q #$ O$ X$ n$ $ $ $ $ & $' -' C' g' ' ' ' ?* f* o* * * * * * - - . . D. ^. h. q. 01 X1 a1 w1 1 1 1 1 3 3 3 3 m4 4 4 4 7 7 7 7 7 8 8 8 N @@@@@@@@@ @ @ @@ @!@"@#@$@%@&@'@()*+,-./0@12@3@45@6@7@8@9@:@;@<@=@>@?@@@A@B@C@D@E@F@G@H@I@J@KVL@MNOPQRSTUWX@Y@Z@][\^_`abc@ghijklmnopqrd@e@f@stuv{|}w@x@y@z@~@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@     @@@@@ !"#$%&'()*@+@,@-@.@/@0@1@2@3@4@5@6@7@89:;<=>?@ABCDEFGHI@JKL@M@ONP@Q@R@S@T@UVWXYZ[\]^_`@a@b@hijklmnoc@d@e@f@g@p@q@r@s@t@u@v@w@x@y@z@{@|@}@~@@@@@@@@@@@@@@@@@@@@@@@@@@@@@    @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@|}~ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxIyz@{@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@     @@@@@@@@ !"#$%&@@@@'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^abcdefghijklmnopqrstuvwxyz{|}~_@`@@@@@@@@@@@@@@@@   @  !"#$%&'()*+,-./ @123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcd0@e@f@g@h@i@j@klmnopqrsuvwxyz{|}t@~@@@@@@@@@@@@@@@@     @ !"#$)*+,-./01234%@&@'@(@5@6@789:;<=>?@ABCDFGHIJE@KMNOPQRSLTU@V@XYZ[\]^_`abcdefghWij@k@mnoplqr@s@uvwtxyz{|}~@@@@@@@@@@    !"#$%&'()*+,-./0123456789:;<=>?@AB  CD@E@IJKLMNOPF@G@H@TUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~QRS@@@@@@@     @ !"#$%&'()*+,-./0123456789:;<=>?@ABCEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnDop@q@r@s@uvwxyz{|}~t@@@@     @@@@@!"#$%&'()*+,-./0123456789:;<=>?@ABC @D@EFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnoprstuvwxyz{q|}~@@@@@@@@@@@@@@@                           ! " # $ % & ' ( ) * + , - . / 0 1 2 @3 @4 @5 @6 @9 : ; < = > ? @ A B C D E F G H I J 7 K 8 @L @M @N @Q R S T U V W X Y O @Z P @[ \ ] ^ g h i j k l m n o p q r s t u v w x y z { | } ~  _ @` @a @b @c @d @e @ f @ @ @ @ @                          @ @ ! @" @# @$ @& ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ % ` a @b @c @d @f g h i j k l m n o p q r s t u v w x y z { | } ~  e @ @ @ @ @ @ @ @ @ @ @             @ @             ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r A s v w x y z { | } ~  t u @ @ @ @ @ @ @                           ! " # $ % & ' ( ) * + , - . 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` / a c d e f g h i j k l m n o p q r s t u v w x y z { | } ~  b @ @ @ @ @ @ @ @                       @ @   @# $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z ! [ " \ @] @^ @_ @` @c d e f g h i j k l m n o p q r s t u v w x y z { | } ~  a b @ @ @       !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~Y@@@@@@@@@@@@@@     @@@@ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOP@Q@STUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~R@@@@@@@@@@@@@@@@@@     @@ !#"@$%&'()*+,-./012345678:9@;<=>?A@@BCDEFGHIJLK@NM@OQP@R@S@TWU@V@XYZ[\]^_@`@ba@ced@fg@hi@j@k@lmnopqrstuv@wx@yz@{@|@}~@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @    @@ !"@#@$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMOPQRSTUVWXYZ[\]^_`N@abcdefghijklmnopqrstuvwxyz{|}~@      !"#$%&'()*+,-./10@234596@78@:@;@<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{MTT 2a2a@dywwwwwwww“ԓԓԓԓԓԓԓԓabbcdefg6JJJJJJJJN26ƣʦϦ]ϨpqrruuuuuuuuWbdθ??jj&666 ::::::::::G#############e e e       A\\\\\\\       !!!!!!!!!!"/"/"/"/"/"/"/"/"/"/"/"~%~%%&&]'P.s.s.s.s.s.s.s.s.///555577>>@@ACCCCCCCCCCCCCCCCCCCEEEEEEEEFFIEIEIEIEIEIEIEIEIEIEIK!KdMdMdMeM N NNNOPCQHQJQJQJQJQJQJQJQJQJQJQJQJQJQJQJQJQJQPQQQR~RRRRVS^SSSWWWWWWWWWWW9XBXJXp]p]p]p]p]]]]]]]]]^>_`7avaa6bpbybb8ccevezffgKggg;hLiLiMiiiii'jojjkkllJlllmSnnnnnnnnnnnnnnnnssQlqvyޙޙޙޙޙޙޙޙޙޙޙ`  >>jjjjjj')ƣƣƣƣƣƣmmmЧ66889>Xikuuuxx`````ѳõĵȵ ľ\\g +/48C[y$$$$$$$$$+2am&7777777777&wu---------------------------------muuuuuuuuuuuuuuuuuuuuuuuuuuuappppppppppppppppppppppppppppppppppppppppppppppppppppEUUUUUUUUU X08jjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjC@J/48CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCz*333333333333333333333333333333333333333333333333>>>>````````````Thj                  Rqqqqqqqq}!!!!!!$$ %4%4%4%4%4%4%4%4%4%4%4%4%4%4%#-(- . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .ZDsDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNvNPPdddu:ޓ:9ըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըըը<>δδδ÷lllll ,,,,,,,,,,8YYYYYYYYY,3487=q!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!+,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,111188888888888888888888888888888888888888888888x;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;<<3>9>#D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D)D>DzGGGRR]]]bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbboooooooooooooooooooooooooooooooooooooooooooooooooo  [X !'3ZUjjjjOPR^-2<L%%%%%%%%%%%%%%%%%%%###U&U&{&{&{&{&{&{&{&{&{&{&''''l(q(s(s()))h********************************************************111111111111111111222222222222222222u4u4u4u4u4i66666666666666Q9s;;;<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<PPPPPPPPPPPPVVWZZ[ [x[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[p\p\__ a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*acdGeHeepggggggggggggggggggggggggggggggggggggggggggggggggggggi jRjWjkkmnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn#q'qqqqqqqqqqqqqqqqqqquuuuuuuuuuuuuuuuuuxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{fGŃ…ωωωωωωωωωωωωωωωCMYttttttttttttttttttttttttttttttttttttttttttzzzzzzzzzԔ̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘̘՟˧Nk}ΰҰ)                                            ]uy  5333333333333333333333333333333333333333333333333333333333MM\ Sdddddddddddddddddddddddddddddddddddddddddddddddddddddd6                                    1@Ag    @ B Qa~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~D\!!!!!$$++L+k,l,},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},},--N.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.a.67::::;BFJMT{WWW\\]]^^aaHggghhviiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiPjUjlllmmm111111111111111111ƕƕƕƕƕƕƕƕƕƕƕƕƕƕƕƕƕƕƕƕǕCCw #);ZMW]]eee~~~~~~~~~~~~~~~~~~՞ٱ  L  *'I',,?<D<D<Z< ]fkq|VWn+Gx7ZD9A=D=D=D=D=D?DADADADSDXDuKLNN4U[k`k`klHx~i_}#ǫ::;=cLr$EgJ"@                                                          E }H }H ;L ;L M M %U ] ^ ,m ,m ,m ւ d 6 6 6 6 < ? bS k l l l l &m &m &m Pm Pm Pm Um Wm Wm sm m m ,q Yz Ƃ W ! ! ! : : ݴ 5 \    > d C q q q q q q q q q q q q q q q q q q q q q [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ [ z $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ = = = = = = = = = = = = = = = = = = ] ] ] ] ] ] ] ] ] ] ] ] ] ] ] ] ] ] + + + + + + + + + + + + + + + + + + 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 T" T" T" T" T" T" T" T" T" T" T" T" T" T" T" T" T" T" . . . . . . . . . . . . . . . . . . . . . . . . . . . . c9 m9 n9 t9 9 o` ` z       w ם G  l M$ W$ l$ $ $ $ $ $ "' ,' A' f' ' ' ' ' c* n* * * * * * * - . . C. \. g. p. . U1 `1 u1 1 1 1 1 1 3 3 3 l4 4 4 4 4 7 7 7 7 8 8 8 @8 N ]W]W܆]W]W4O]WT]W 67N D N 9*urn:schemas-microsoft-com:office:smarttagsplace=*urn:schemas-microsoft-com:office:smarttags PlaceName=*urn:schemas-microsoft-com:office:smarttags PlaceType8*urn:schemas-microsoft-com:office:smarttagsdate 1319957DayMonthYear{AǓɓ͓̓fgimrs679:?@ȣɣˣ̣Σϣƥǥ ϦЦզ֦ڦۦ"#*+469:fglmvǨȨ˨̨ШѨ٨ڨBCFGMNTUXYabcdmowx !!!!!!!!!!!!!!!!!""%%%%%%7777KK K KKK$K%K/K2K4K5KaMbMeMfMlMmM{M|M}M~MMMMMMMMMNNNNNNNNNNNNOOOOOOPP P PPP P!PPPPPPPPPPPPP RRRRRR&R'R0R1RSSSSSSSSSSTTTTTT T!TXXXX!X"X*X+X5X6X7X8X?X@XCXDXNXOXTXUX^^^__ __8_;_<_````````a a"a#a+a,a2a3a@anavawa}a~aaaaaaaaaa*b7b8brbsbxbyb}b~bbbbbbbbbbbbbbbbb9c:cEcGcOcPcccccd7d>d?dEdFdJdKdVddddddddddCeDeMeqewexeeeeeeeeeeezf{fffffffffffgg g ggIgOgPgZg[gdggggggggh.h6h7h>h?hDhEhHhIhPhQhZh[h`hahJiKiPiQiZiiiiiiiiiiiiijjj j"j,j.j1j2j8j9jCjkjqjrjvjwjjjjjkkkkkkll l llAlElFlllllllllllmmnnnnnnnnnn     #)*.1453478@AIJPQXZ\]cdklsx}~ŵƵʵ˵͵ε#$YZ_`hiop|}\]_bfg()*,23;<DEQRUV   !)*239:ACGHvwz{yz67ABEFKLWXZ[ denopq"#%&5678wy1245@AIKVX]_bclmtuwxXY_`depqrs#$-./0hiuv!"*+0156?@AB#$()2345lmst}~ !+,5678xy$%./01lmvwxy   $%&'cdklopuv)*,-56<=FGHI   !#$!"`abc./89BCDE67;<GJTVYZ`aklno<=FGPQRS@AIJQRWX[\cdmnop  PQYZ[\45;<DEFGxy)*,-1267:;FGHI"#*+4567  RSUV\]^_Z$[$`$a$i$j$p$q$}$~$$$$$$$'-(-*-+-.-/-XDYD\D]DjDkDqDrDxDzDDDDD}P~PPPPPPPPPPPPPPP:;>APQWXk!l!s!t!u!v!~!!!!!!!!<<<<<<<<<<>>>>(>)>+>,>4>5>;><>H>K>O>Q>VGWG[G\G`GaGlGmGvGwGzG{GGGGGQRZ]ab 23:;CDLM]`cdB9C9F9G9O9P9\9]9c9g9j9k9v;w;z;{;;;;;;;;;;;;;ZZZZZZZZZ[[[q\r\x\y\|\}\ccccccccccccddddIeKeSeTeeeeekkkkkkkkkkkkkkkkkkŰư˰̰аѰٰڰ_`bdstz{ #$*+               ----------------LLLL(M)M4M5M=M>MMMMMUUUUWWWW\\\\`4!l)K7Y^6X`;]@x/Q Hj _ "!D!!!!"8"Z""""#N#p###%$G$$$$%k%%%%C&f&&&'5't''''C(f((()4)x))))3*V***+%+i++++/,R,,,,-S-v---:.]...%/H////0K0n000171y1112?2b2222!3c3333'4J4445&5j5556[6~6667<7w777768Y888 909999:[:~:::1;T;;; <-<`<<<<1=T===>*>g>>>>?9?l????*@M@@@@AQAtAAAB9BBBBC2CUCCCCDHDkDDDDENEqEEEF$FSFvFFFG'G}GGG HdHHHHLIoIIIJ1JzJJJ KSKvKKKL%L^LLLLMBMMMM NNNqNNN6OYOOOP$PePPPP6QYQQQR'RuRRRR6SYSSSSTsTTTTIUlUUU%VHVVV W0WjWWWW%XHXXXY:YYYYZaZZZZ>[a[[[\7\z\\\\3]V]]]^+^l^^^^I_l___`7`~```a7aZaaaaaObrbbb.cQccc d0ddddd5eXeeee fVfyfff g,gggg hahhhh2iUiiiijXj{jjjk>kkkklNlqlllm%m\mmmmn>n~nnnn"oEooo p-pnpppqwqqqrjrrr sfssss3tVtttu3u~uuu vOvrvvvw?wwwwwx@xxxxykyyyzHzkzzzz {Y{|{{{{{d||||B}d}}}~A~~~~b@bˀ<Wyт7Yă<Lȓ•jؚKhbvZ[wxȧu|1_ʫɬ)}9 : : : : : : : : : : : : ; ; %; -; N Ǔ2fgi679bvȣɣˣϦЦզ֦ڦۦ"#*+4:fglmvǨȨ˨̨ШѨ٨ڨ!BCFGMNTUXYabcdm!!!!!!!!!!!!!!!!P%%%%67JKK K KKK$K%K/K@MaMbMeMfMlMmM{M|M}M~MMMMMMNNNNNNNNNNOOOOOOOPP P PPWPPPPPPPPPPQ RRRRRR&RSSSSSSSSSSSTTTTTWXXXX!X"X*X+X5X6X7X8X?X@XCXDXNX^^^^__ __8_;_<_E_q```````a a"a#a+a,a2a3a@anavawa}a~aaaaaaaaaa*b7b8bAbjbrbsbxbyb}b~bbbbbbbbbbbbbbc9c:cEccccccd7d>d?dEdFdJdKdVdddddddddde7eCeDeMeqewexeeeeeeeeLfzf{fffffffffffgg g ggIgOgPgZg[gdggggggh.h6h7h>h?hDhEhHhIhPhQhZh iJiKiPiQiZiiiiiiiiiiiiijjj j"j,j.j1j2j8j9jCjkjqjrjvjwjjjjjjPkkkkkkkkkll l llAlElFlQl{lllllllllllmmmnnnnnnnnٟ    )*.3478@AIJPQX6cdklsŵƵʵж'YZ_`hiop|}+\]_()*,23;<DEQVz !)*239:AUvwz{_Byz67ABEFKLWh %den}"#%&5CwyY1245@AIKVX]_bclmt'XY_`dep#$-6hiuv!"*+0156?Nl#$()2;lmst}~ !+,5Dxy$%.=lmv   $4cdklopuv)*,-56<=FU   -`p./89BQ 67;<GJTVYZ`akz <=FGP`      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklnopqrstuvwxyz{|}~@AIJQRWX[\cdmPQY45;<DRxy)*,-1267:;FU"#*+4B  RSUV\($Z$[$`$a$i$j$p$q$}$~$$$$,'-(-*-#DXDYD\D]DjDkDqDrDxDHP}P~PPPPPPPPPPPP:;>;!k!l!s!t!u!v!~!!!!!j<<<<<<<<=>>>>(>)>+>,>4>5>;><>H>$GVGWG[G\G`GaGlGmGvGwGzG{GGqQRZv23:;CDLM]| 9B9C9F9G9O9P9\9]9c9U;v;w;z;{;;;;;;;;;;ZZZZZZZZZZ@\q\r\x\eccccccccccddeIeqeevkkkkkkkkkkkkkkkkŰư˰̰аѰٰڰ7_`b c            V--------------[LLL(MaMMtUUWWp\\j}hR̔ ^\jHN%CClj1V>$X*t-/^p<.SQC HlbCn~)FNHGaZU; N%N Q|tb ^7i TG `Kf1 bw2H!6M"*t-E" dzU$ y$@$N%yt% y%$`9&^)L&V;a&N%v'L&'.a' RU)8) _)sd2BN**4Fs* C*X/yA*`(L]+D-?{vQV.(֮FA.  /lE4/ !0O[0xBPZ%0Jfj"U2`v3*eR0R3 93ft3 u3434LP@3fo By;4.P3jx~4(J*4$F5Xk5(}8_[~5Ίڞ =t6vVIk}6Zw7rft8Εp_9ް9. "}:YFEj:p%de|: J:zRLC: ;T/S; We;dd*;~"/u;&9;`"Pf->zgi?\r?X^k)?4*l_O?Bƚi C?N%Ni@Gp@,D2^IDXE>D84M8#DF,0kSDS]DjX#D|4PD R;sE PEN%UF"[GN% H_'H6J0+HI0r@IN:^I\#uS)JZLJx 80J8%bKN%_%LF.NkML|uh + iM. zHMޭ̀)"N aNxBP Qmv%/Jh iz!&i&E j` KjΕp1jޭ̀lTn1luplfH_ m2 m=nΕpxn&0Yo4XpR3[tx,n+tjyltޭ̀p:uh1-b=v0JG.][v|ow (?wZ HD xN x  yEyyN%9Cz:@ $nz t7.|8O|´2Ob(L}vt`^}z~D+4}NFT }xi~VP ~GF~(}8_t ~ΕpN~h|Y; @ Appendix @ @ @ @ @ @ @ @ *h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo( 0P0^0`POJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo( hh^h`OJQJo( hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo( P^`POJQJo(h ^`OJQJo( pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo( hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh   ^ `OJQJo(h \ \ ^\ `OJQJo(h ,,^,`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh ll^l`OJQJo( hh^h`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo( hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.hTT^T`.h$ L$ ^$ `L.h  ^ `.h^`.hL^`L.hdd^d`.h44^4`.hL^`L.h ^`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(@h^`. rr^r`OJQJo(h^`.h^`.hpLp^p`L.h@ @ ^@ `.h^`.hL^`L.h^`.h^`.hPLP^P`L.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(hh^h`56CJOJQJo([]h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h TT^T`OJQJo(h $ $ ^$ `OJQJo(oh   ^ `OJQJo(h ^`OJQJo(h ^`OJQJo(oh dd^d`OJQJo(h 44^4`OJQJo(h ^`OJQJo(oh ^`OJQJo( hh^h`56CJ3. ^`CJOJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( z^`zOJQJo(h ^`OJQJo(vh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(hh^h`56CJOJQJo([]^`.pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(vh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.h 0P0^0`POJQJo(h L ^ `L.h\ \ ^\ `.h,,^,`.hL^`L.h^`.h^`.hlLl^l`L.@h^`.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`B.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.h^`.h^`.h L ^ `L.h\ \ ^\ `.h,,^,`.hL^`L.h^`.h^`.hlLl^l`L.h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo( hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`. hh^h`OJQJo(h ^`OJQJo(vh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(XX^X`o(.^`.pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`A. hh^h`OJQJo(hh^h`56CJOJQJo([]h TT^T`OJQJo(oh $ $ ^$ `OJQJo(h   ^ `OJQJo(h ^`OJQJo(h ^`OJQJo(oh dd^d`OJQJo(h 44^4`OJQJo(h ^`OJQJo(oh ^`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(h pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(v ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(^`o(.^`.pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L. hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh   ^ `OJQJo(h \ \ ^\ `OJQJo(h ,,^,`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh ll^l`OJQJo(hh^h`56CJOJQJo([]h ^`OJQJo(vh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( >^`>OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.^`o(.0^`0o(..0^`0o(... 0^`0o( .... TT^T`o( ..... TT^T`o( ...... `^``o(....... `^``o(........h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh   ^ `OJQJo(h \ \ ^\ `OJQJo(h ,,^,`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh ll^l`OJQJo(h ^`OJQJo(vh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(hh^h`56CJOJQJo([]h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(hh^h`56CJOJQJo([]h88^8`CJOJQJo(h ^`OJQJo(oh   ^ `OJQJo(h   ^ `OJQJo(h xx^x`OJQJo(oh HH^H`OJQJo(h ^`OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(^`B*OJQJo(ph ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh   ^ `OJQJo(h \ \ ^\ `OJQJo(h ,,^,`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh ll^l`OJQJo(h^`B*OJQJo(phh ^`OJQJo(h pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( z^`zOJQJo( hh^h`OJQJo(h ^`OJQJo(vh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(vh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(hh^h`56CJOJQJo([]h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(vh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.h^`.hpLp^p`L.h@ @ ^@ `.h^`.hL^`L.h^`.h^`.hPLP^P`L.hh^h`56CJOJQJo([]^`56CJOJQJo([]pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(oh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`) M^`MOJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(hh^h`56CJOJQJo([]h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.h^`.hpLp^p`L.h@ @ ^@ `.h^`.hL^`L.h^`.h^`.hPLP^P`L.h ^`OJQJo(h ^`OJQJo(oh ^`OJQJo(h   ^ `OJQJo(h PP^P`OJQJo(oh   ^ `OJQJo(h ^`OJQJo(h ^`OJQJo(oh ^`OJQJo(@h H^H`56CJ. h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.hTT^T`.h$ L$ ^$ `L.h  ^ `.h^`.hL^`L.hdd^d`.h44^4`.hL^`L.@h^`A.  h^h`56CJ5.3. @h^`. M^`MOJQJo(@h^`.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(h pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.h ^`OJQJo(h L ^ `L.h\ \ ^\ `.h,,^,`.hL^`L.h^`.h^`.hlLl^l`L.^`B*OJQJo(ph ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(hh^h`56CJOJQJo([]@h^`.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`.h^`.hpLp^p`L.h@ @ ^@ `.h^`.hL^`L.h^`.h^`.hPLP^P`L.0^`0o(0^`0o(.0^`0o(..0^`0o(... 88^8`o( .... 88^8`o( ..... `^``o( ...... `^``o(....... ^`o(........h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(hh^h`56CJOJQJo([]h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h pp^p`OJQJo(oh @ @ ^@ `OJQJo(h ^`OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h PP^P`OJQJo(oh   ^ `OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo( ^` o( ^` o(.0^`0o(..0^`0o(... 88^8`o( .... 88^8`o( ..... `^``o( ...... `^``o(....... ^`o(........h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( \^`\56CJ5.1. h ^`OJQJo(oh ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(hh^h`56CJOJQJo([]^`.pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(hh^h`o(.^`.pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(0^`0o(0^`0o(.0^`0o(..0^`0o(... 88^8`o( .... 88^8`o( ..... `^``o( ...... `^``o(....... ^`o(........h ^`OJQJo(h ^`OJQJo(oh   ^ `OJQJo(h \ \ ^\ `OJQJo(h ,,^,`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh ll^l`OJQJo( hh^h`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h h^h`56CJ.  hh^h`OJQJo(@h^`. 0P0^0`POJQJo( hh^h`OJQJo( pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(@h8^8`. hh^h`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( M^`MOJQJo(h^`.h^`.hpLp^p`L.h@ @ ^@ `.h^`.hL^`L.h^`.h^`.hPLP^P`L.h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( z^`zOJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(z^`z5OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`.h^`CJOJQJo(h^`B*OJQJo(phh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h +^+`56CJ() h^`.h^`.hpLp^p`L.h@ @ ^@ `.h^`.hL^`L.h^`.h^`.hPLP^P`L.h^`CJOJQJo(h ^`OJQJo(h pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo( hh^h`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(^`)^`.pLp^p`L.@ @ ^@ `.^`.L^`L.^`.^`.PLP^P`L.h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(88^8`o(.pp^p`.@ L@ ^@ `L.^`.^`.L^`L.^`.PP^P`. L ^ `L.  h^h`56CJ5.3. @h h^h`56CJ2.  z^`zOJQJo(h ^`OJQJo( ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h ^`OJQJo(h^`B*OJQJo(phh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h2miMicrosoft Word 10.0@@Zi@zM@zMu X   FMicrosoft Word-document MSWordDocWord.Document.89q  NASA90??3 O NASA Software Safety Guidebook Titel\  8@v/sitemap.htmZ )http://llis.nasa.gov/llis/llis/main.html[ &http://standards.nasa.gov/sitemap.htmgm $http://www.sohar.com/J1030/appb.htm:, ;http://www.esrin.esa.it/tidc/Press/Press96/ariane5rep.html&6 ;http://www.stsc.hill.af.mil/crosstalk/1998/apr/simplex.asp 1http://www-energy.llnl.gov/FESSP/CSRC/122246.pdf^ %http://www.fda.gov/cdrh/ode/1252.pdftz <http://www.stsc.hill.af.mil/crosstalk/2000/jan/fischman.asp0y 'http://arti.vub.ac.be/~cyrano/AUTOSYS/ 6http://www.devicelink.com/mddi/archive/99/01/013.htmlBW .http://www.embedded.com/1999/9911/9911ia2.htm!' 0http://www.embedded.com/2000/0011/0011feat5.htmzj} 3http://www.sciam.com/0697issue/0697villasenor.html^z -http://www.embedded.com/1999/9906/9906sr.htmEw "http://www.optimagic.com/faq.html1st Hhttp://www.sei.cmu.edu/publications/documents/90.reports/90.tr.011.html Nq >http://www.isdmag.com/design/embeddedtools/embeddedtools.html,'n 0http://www.embedded.com/1999/9911/9911feat2.htm.&k 0http://www.embedded.com/1999/9910/9910feat1.htmt"h 'http://www.embedded.com/98/9803fe3.htm).e 0http://www.embedded.com/2000/0009/0009feat4.htmb .http://www.embedded.com/98/9812/9812feat2.htm)+_ Dhttp://www.serc.net/TechReports/abstracts/catagory/Reliability.html Z\ *http://www.esconli՜.+,D՜.+,` px  NASA90??3 O NASA Software Safety Guidebook Titel\  8@ _PID_HLINKSA gm+ $http://www.sohar.com/J1030/appb.htmgm( $http://www.sohar.com/J1030/appb.htmgm% $http://www.sohar.com/J1030/appb.htmgm" $http://www.sohar.com/J1030/appb.htmqm http://www.amleth.demon.co.uk/% mailto:PEB@amleth.demon.co.ukCK _5._SOFTWARE_SAFETY_1$ _C.4.8_Example_formsVI  _APPENDIX_Bo: http://www.ganssle.com/VC http://www.embedded.com/Z 1http://www.bmpcoe.org/guideline/books/index.htmlXD 6http://www.enteract.com/~bradapp/links/swe-links.html}D 7http://www.cc.gatech.edu/computing/SW_Eng/hotlist.htmlK  =http://www.caip.rutgers.edu/~marsic/Teaching/ISE-online.html  >http://www.qucis.queensu.ca/Software-Engineering/reading.html!d http://manta.cs.vt.edu/ase/1 ,http://www.pogner.demon.co.uk/mil_498/6.htm'|  http://www.construx.com/doc.htmFI 2http://www.construx.com/survivalguide/chapter.htm'e 'http://www.testingcraft.com/index.html> 8http://www.testworks.com/Institute/HotList/index.9.html1# http://www.softwareqatest.com/ *http://www.testingstuff.com/testing2.htmlm< +http://www.fda.gov/cdrh/comp/swareval.html  http://www.io.com/~wazmo/qa/n %http://www.ssq.org/welcome_main.htmlUQ =http://www.cera2.com/WebID/realtime/safety/blank/org/a-z.htm!1 /http://hissa.ncsl.nist.gov/publications/sp223/*o +http://archive.comlab.ox.ac.uk/safety.htmlDH %http://sunnyday.mit.edu/safety-club/4* (http://www.cmpcmm.com/cc/standards.html W -http://www.methods-tools.com/html/tools.html34 /http://www.totalmetrics.com/resource/links.htm&! http://www.swebok.org/mj $http://swg.jpl.nasa.gov/index.shtmlG http://www.ivv.nasa.gov/}j (http://satc.gsfc.nasa.gov/homepage.html  http://sel.gsfc.nasa.gov/e} +http://atb-www.larc.nasa.gov/fm/index.htmlO 9http://iss-www.jsc.nasa.gov:1532/palsagnt/plsql/palshomeJ :http://www.hq.nasa.gov/office/hqlibrary/books/nasadoc.htm  http://nodis.hq.nasa.gov/[ &http://standards.nasa.gone.com/98fallpapers.htmY _7.2.2_Monitoring_ContractorlV _3.2.3.3_Tailoring_theCKS _5._SOFTWARE_SAFETY_2FKP _4._SAFETY_CRITICAL_1M _7.1.2.4_Dealing_with*VJ _7.1.2.3_Adding_new&^G _7.1.4_Who_Tests 0D _E.1_Checklist_forCKA _5._SOFTWARE_SAFETY_2 0> _E.1_Checklist_for=; _6.10__Operating(8 #_7.2_Contractor-developed_Software~5 _7.1_Off-the-Shelf_Softwarev2 _4.2.3_Formal_Methods,/ _6.14.3.2_Testing_and g, _4.2.4__Model$0) http://www.its.dot.gov/) & _6.14.3_Case_Study # !http://www.pantos.org/35317.htmlQ &http://solo.dc3.com/white/wsperf.html$p $http://www.dbmsmag.com/9707i03.html<# Hhttp://www.soft.com/eValid/Technology/White.Papers/website.testing.html:1 ?http://msdn.microsoft.com/workshop/author/script/weberrors.aspEf _6.11_Distributed_Computing http://vic.lerc.nasa.gov/_ http://www.microsoft.com/_ http://www.microsoft.com/*o http://www.embedded-linux.org/Y http://www.linux.org/A http://www.rtems.com/mc 'http://www.mentor.com/embedded/vrtxos/OX 2http://www.lynuxworks.com/products/whatislos.htmlA http://www.kadak.com/0= 4http://www.microware.com/Products/Software/OS9.html|> http://www.cmx.com/, http://www.qnx.com/3* 7http://www.windriver.com/products/html/psosystem3.htmlmc 'http://www.mentor.com/embedded/vrtxos/QE http://www.enea.com/J http://www.windriver.com/FF )http://www.ddci.com/products/SCORoot.htm&x http://wombat.doc.ic.ac.uk/q! bhttp://www.cs.hmc.edu/tech_docs/qref/rational/DevelopmentStudioUNIX.1.1/docs/html/rup_ada/ada.htm1# http://www.softwareqatest.com/]) _4.8_Software_Operations1# http://www.softwareqatest.com/g& http://www.sqatest.com/  http://www.io.com/~wazmo/qa/&< ;http://www.chillarege.com/authwork/TestingBestPractice.pdf>B _4.6__Software  2http://www.cse.cuhk.edu.hk/~lyu/book/reliability/q Ghttp://satc.gsfc.nasa.gov/suport/software_metrics_and_reliability.htmlxy <http://www.stsc.hill.af.mil/crossTalk/1995/feb/Reliable.aspLL 1http://techreports.jpl.nasa.gov/1993/93-1886.pdf2; 'http://www.cs.colostate.edu/~cs630/rh/=i /http://www.icaen.uiowa.edu/~ankusiak/reli.html/, http://www.meadep.com/j .http://rac.iitri.org/DATA/RMST/rel_model.html ` _4.2.6_Formal_Inspections>B _4.6__Software _E.4_Checklist_of_1 ` _4.2.6_Formal_Inspectionso _5.4.9__Finaln _5.3.4_Design_Constraintl _5.4.5_Update_Design%  _APPENDIX_C_SoftwareVI  _APPENDIX_Dq _4.2.4_Formal_MethodsP: _4.2.3.1_Object_Oriented _6._Programming_Languages_1R1 _5.3.8.1_Function_Points] http://www.ifpug.org/ p _5.1.6_Software_Fault(~ _5.1.5_Timing,_Throughputl{ _3.2.3.3_Tailoring_the `x _5.1.4_Formal_Inspections!Iu _4.2.2.3__Hazardous `r _4.2.6_Formal_Inspectionss?o table22ql _5.2.1_Update_Criticalityy i  _5.1.2_Requirements_CriticalityP6f _5.1_Software_Safety-Xc _2.5_Software_Subsysteml` _2.3_Preliminary_Hazardy ]  _5.1.2_Requirements_CriticalityP6Z _5.1_Software_Safety3GW _4.2.1_Development_ofdT _5.2_Architectural_DesigndQ _4.3_Architectural_Design$ N _APPENDIX_B_Softwares9K table42/[H _4.2_Software_Requirements `E _4.2.6_Formal_InspectionsZ/B _2.3.1.2_Risk_Levelsy ?  _5.1.2_Requirements_Criticality!H< _4.2.2.2__Hazardous c9 _4.2.5_Formal_Inspections7E6 _5.1.2.1_Critical_Software `3 _4.2.6_Formal_Inspections3G0 _4.2.1_Development_ofq- _4.2.4_Formal_Methods$)* <http://tide.it.bond.edu.au/inft390/002/Resources/sysreq.htmM*' _4.2.1.1_Safety_RequirementsD0$ _5.6_Operations_&/Z! _5.5_Test_Analysis4[ _5.4_Code_AnalysisR' _5.3_Detailed_Designd _5.2_Architectural_DesignP6 _5.1_Software_Safetyh _3.3_Incorporating_Software% _5.4.11_Program_Slicingv| <http://www.stsc.hill.af.mil/crosstalk/1995/apr/testinoo.aspTH %http://www.rbsc.com/pages/myths.htmlO +http://www.cetus-links.org/oo_testing.html &http://www.rbsc.com/pages/ootbib.html4 @http://www.cigital.com/presentations/testing_objects/sld001.htms _4.6.4_System_Testing1# http://www.softwareqatest.com/~ _4.6.7_Software_Safetyd] _6.15_Good_Programming3G _4.2.1_Development_of#W _5.1.3_Specification_Analysis _6._Programming_Languages_1~ _7.1_Off-the-Shelf_Softwares9 table42UY *http://www.rationalrose.com/modelchecker/ 4 'http://www.time-rover.com/TRindex.html}8 >http://www.cadence.com/eda_solutions/flv_fveimc_l3_index.htmlz7 'http://www-cad.eecs.berkeley.edu/~vis/N  -http://www.cs.cmu.edu/~modelcheck/verus.htmlB  2http://www.docs.uu.se/docs/rtmv/uppaal/index.html 0 http://rodin.stanford.edu/<x http://tvs.twi.tudelft.nl/a| /http://www.cis.upenn.edu/~lee/inhye/treat.html)> ,http://sprout.stanford.edu/dill/murphi.html http://www.brics.dk/mona/`y @http://www-verimag.imag.fr//TEMPORISE/kronos/index-english.htmlK .http://www-cad.eecs.berkeley.edu/~tah/HyTech/<t +http://www.cs.cmu.edu/~modelcheck/smv.html{q 7http://netlib.bell-labs.com/netlib/spin/whatispin.html,o 0http://www.math.hmc.edu/~jpl/modelcheckers.html!  0http://www.abo.fi/~johan.lilius/mc/mclinks.html( _5.1.5_Timing,_Throughput`E _Glossary_of_Termsd _4.3_Architectural_Designb_ _E.2_Generic_Software~ _5.1.1_Software_Safety{ _4.2.2__Genericl _2.3_Preliminary_Hazard} _5.5.4_Checklists_ofJ> _5.5.3_Reliability_ModelingV.  _5.5.6_Independent_Verification|  _5.5.5_Test_Results} _5.5.4_Checklists_ofJ> _5.5.3_Reliability_Modeling b _5.5.2_Formal_Inspectionsg _5.5.1_Test_Coverage&^ _7.1.4_Who_Tests~ _4.6.7_Software_Safetyw| _4.6.5__Softwaresy _4.6.4_System_TestingB$v _4.6.3_Integration_Testingvs _4.5.4_Unit_Level- p _5.4.12_Update_Software%m _5.4.11_Program_Slicing8 j _5.4.10__Final'Fg _5.4.9_Interrupt_Analysis~ d _5.4.8_Unused_Codega _5.4.7_Applying_Formali^ _5.4.6_Formal_Codel[ _5.4.5_Update_Design hX _5.4.4_Update_MeasurementdU _5.4.3_Code_InterfacekR _5.4.2_Code_DatanO _5.4.1_Code_Logicl$L _4.5.3_Refactoring-]I _4.5.2_Defensive_ProgrammingaF _4.5.1_Coding_Checklists ,C _5.3.16_Updates_toZh@ _5.3.15_Software_Failure;= _5.3.14_Formal_Inspections- : _5.3.13_Requirements_State.7 _5.3.12_Formal_MethodszJ4 _5.3.11_Selection_of 61 _5.3.10_Measurement_ofw. _5.3.9_Markov_Modeling7]+ _5.3.8_Dynamic_Flowgraphn( _5.3.7_Rate_Monotonicz% _5.3.6_Software_Elementa" _5.3.5_Design_Functionaln _5.3.4_Design_Constraintl _5.3.3_Design_Interfacer _5.3.2_Design_Datai _5.3.1_Design_Logic f _4.2.5__Modelu _5.2.8__Formal ` _5.2.7_Formal_Inspectionsd _5.2.6_Update_Softwareo _5.2.5_Update_Timing,;_ _5.2.4.2__Independencew "_5.2.4.1_Interdependence_Analysis>L_5.2.3_Analyze_Architectural7Y_5.2.2_Conduct_Hazardq_5.2.1_Update_Criticalitye_4.3.5_Coding_StandardsD _4.3.3_Selection_of~_7.1_Off-the-Shelf_SoftwareD!_4.3.2_Selection_of p_5.1.6_Software_Fault(_5.1.5_Timing,_Throughput `_4.2.6_Formal_Inspectionsq_4.2.4_Formal_Methods#W_5.1.3_Specification_Analysis{_4.2.2__Genericy  _5.1.2_Requirements_Criticality(E_5.1.1.1_Checklists_and~_5.1.1_Software_Safetyl_2.3_Preliminary_HazardVI _APPENDIX_EB  table313s>table37s>table35B  table313s>table37CK_5._SOFTWARE_SAFETY_2FK_4._SAFETY_CRITICAL_1CK_5._SOFTWARE_SAFETY_1FK_4._SAFETY_CRITICAL_1~_5.1.1_Software_SafetyS_7._SOFTWARE_ACQUISITION_3s>table31l_3.2.3.3_Tailoring_thes>table33s>table35@K figure32s>table34s?table23Z/_2.3.1.2_Risk_LevelsZ(_2.4.1.2_Risk_LevelsJ>_5.5.3_Reliability_Modeling| _5.5.5_Test_Resultsg_5.5.1_Test_Coveragej_4.6.6_Test_Witnessing~}_4.6.7_Software_Safety xz_4.6.6_Regression_Testing1Ww_4.6.1_Testing_Techniques'Ft_5.4.9_Interrupt_Analysis~ q_5.4.8_Unused_Codein_5.4.6_Formal_Codedk_5.4.3_Code_Interfacekh_5.4.2_Code_Datane_5.4.1_Code_Logic\hb_5.3.13_Software_Failure/ __5.3.11_Requirements_State7S\_5.3.6_Dynamic_FlowgraphnY_5.3.4_Design_ConstraintlV_5.3.3_Design_InterfacerS_5.3.2_Design_DataiP_5.3.1_Design_LogicuM_5.2.8__Formal;_J_5.2.4.2__IndependencewG"_5.2.4.1_Interdependence_Analysis>LD_5.2.3_Analyze_Architectural7YA_5.2.2_Conduct_Hazardq>_5.2.1_Update_Criticality `;_5.1.4_Formal_Inspections(8_5.1.5_Timing,_Throughput p5_5.1.6_Software_Fault#W2_5.1.3_Specification_Analysisy / _5.1.2_Requirements_CriticalityP6,_5.1_Software_Safety3G)_4.2.1_Development_ofM*&_4.2.1.1_Safety_RequirementsT<#_2.3.1_PHA_Approachl _2.3_Preliminary_Hazards>table31b_3.2_Scope_ofZ/_2.3.1.2_Risk_LevelsS_7._SOFTWARE_ACQUISITION_3_6._Programming_Languages_1CK_5._SOFTWARE_SAFETY_1FK _4._SAFETY_CRITICAL_1CM_3._SOFTWARE_SAFETY_2CM_3._SOFTWARE_SAFETY_2s?table23y  _5.1.2_Requirements_Criticality^cOhttp://books.usapa.belvoir.army.mil/cgi-bin/bookmgr/BOOKS/P385_16/FIGFIGUNIQ10AK figure22n! http://wwwsrqa.jsc.nasa.gov/pceAK figure22a_2.3.1.1_Identifying_HazardsCM_3._SOFTWARE_SAFETY_2s?table23s?table22mi_2.4.3_Tools_and_1l_2.3_Preliminary_Hazard `_4.2.6_Formal_Inspectionss?table21s>table31VI _APPENDIX_EVI _APPENDIX_D5_APPENDIX_C_1$ _APPENDIX_B_SoftwareVI _APPENDIX_A_8._REFERENCESS_7._SOFTWARE_ACQUISITION_3_6._Programming_Languages_1CK_5._SOFTWARE_SAFETY_1FK_4._SAFETY_CRITICAL_1CM_3._SOFTWARE_SAFETY_2:_2._SOFTWARE_SAFETY_7._REFERENCESS_7._SOFTWARE_ACQUISITION_3VI _APPENDIX_A6_Toc5097339756_Toc5097339746_Toc5097339736_Toc5097339726_Toc5097339716_Toc5097339706|_Toc5097339696v_Toc5097339686p_Toc5097339676j_Toc5097339666d_Toc5097339656^_Toc5097339646X_Toc5097339636R_Toc5097339626L_Toc5097339616F_Toc5097339606@_Toc5097339596:_Toc50973395864_Toc5097339576._Toc5097339566%_Toc5097339176_Toc5097339166_Toc5097339156_Toc5097339146 _Toc5097339136_Toc5097339126_Toc5097339116_Toc5097339106_Toc5097339096_Toc5097339080_Toc5109376230_Toc5109376220_Toc5109376210_Toc5109376200_Toc5109376190_Toc5109376180_Toc5109376170_Toc5109376160_Toc5109376150_Toc5109376140_Toc5109376130_Toc5109376120_Toc5109376110_Toc5109376100_Toc5109376090_Toc5109376080_Toc5109376070_Toc5109376060z_Toc5109376050t_Toc5109376040n_Toc5109376030h_Toc5109376020b_Toc5109376010\_Toc5109376003V_Toc5109375993P_Toc5109375983J_Toc5109375973D_Toc5109375963>_Toc51093759538_Toc51093759432_Toc5109375933,_Toc5109375923&_Toc5109375913 _Toc5109375903_Toc5109375893_Toc5109375883_Toc5109375873_Toc5109375863_Toc5109375853_Toc5109375843_Toc5109375833_Toc5109375823_Toc5109375813_Toc5109375803_Toc5109375793_Toc5109375783_Toc5109375773_Toc5109375763_Toc5109375753_Toc5109375743_Toc5109375733_Toc5109375723_Toc5109375713_Toc5109375703_Toc5109375693_Toc5109375683_Toc5109375673_Toc5109375663_Toc5109375653_Toc5109375643~_Toc5109375633x_Toc5109375623r_Toc5109375613l_Toc5109375603f_Toc5109375593`_Toc5109375583Z_Toc5109375573T_Toc5109375563N_Toc5109375553H_Toc5109375543B_Toc5109375533<_Toc51093755236_Toc51093755130_Toc5109375503*_Toc5109375493$_Toc5109375483_Toc5109375473_Toc5109375463_Toc5109375453 _Toc5109375443_Toc5109375433_Toc5109375423_Toc5109375413_Toc5109375403_Toc5109375393_Toc5109375383_Toc5109375373_Toc5109375363_Toc5109375353_Toc5109375343_Toc5109375333_Toc5109375323_Toc5109375313_Toc5109375303_Toc5109375293_Toc5109375283_Toc5109375273_Toc5109375263_Toc5109375253_Toc5109375243_Toc5109375233_Toc5109375223_Toc5109375213|_Toc5109375203v_Toc5109375193p_Toc5109375183j_Toc5109375173d_Toc5109375163^_Toc5109375153X_Toc5109375143R_Toc5109375133L_Toc5109375123F_Toc5109375113@_Toc5109375103:_Toc51093750934_Toc5109375083._Toc5109375073(_Toc5109375063"_Toc5109375053_Toc5109375043_Toc5109375033_Toc5109375023 _Toc5109375013_Toc5109375002_Toc5109374992_Toc5109374982_Toc5109374972_Toc5109374962_Toc5109374952_Toc5109374942_Toc5109374932_Toc5109374922_Toc5109374912_Toc5109374902_Toc5109374892_Toc5109374882_Toc5109374872_Toc5109374862_Toc5109374852_Toc5109374842_Toc5109374832_Toc5109374822_Toc5109374812_Toc5109374802_Toc5109374792_Toc5109374782z_Toc5109374772t_Toc5109374762n      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcde_Toc5109374752h_Toc5109374742b_Toc5109374732\_Toc5109374722V_Toc5109374712P_Toc5109374702J_Toc5109374692D_Toc5109374682>_Toc51093746728_Toc51093746622_Toc5109374652,_Toc5109374642&_Toc5109374632 _Toc5109374622_Toc5109374612_Toc5109374602_Toc5109374592_Toc5109374582_Toc5109374572_Toc5109374562_Toc5109374552_Toc5109374542_Toc5109374532_Toc5109374522_Toc5109374512_Toc5109374502_Toc5109374492_Toc5109374482_Toc5109374472_Toc5109374462_Toc5109374452_Toc5109374442_Toc5109374432_Toc5109374422_Toc5109374412_Toc5109374402_Toc5109374392_Toc5109374382_Toc5109374372_Toc5109374362~_Toc5109374352x_Toc5109374342r_Toc5109374332l_Toc5109374322f_Toc5109374312`_Toc5109374302Z_Toc5109374292T_Toc5109374282N_Toc5109374272H_Toc5109374262B_Toc5109374252<_Toc51093742426_Toc51093742320_Toc5109374222*_Toc5109374212$_Toc5109374202_Toc5109374192_Toc5109374182_Toc5109374172 _Toc5109374162_Toc5109374152_Toc5109374142_Toc5109374132_Toc5109374122_Toc5109374112_Toc5109374102_Toc5109374092_Toc5109374082_Toc5109374072_Toc5109374062_Toc5109374052_Toc5109374042_Toc5109374032_Toc5109374022_Toc5109374012_Toc5109374005_Toc5109373995_Toc5109373985_Toc5109373975_Toc5109373965_Toc5109373955_Toc5109373945_Toc5109373935|_Toc5109373925v_Toc5109373915p_Toc5109373905j_Toc5109373895d_Toc5109373885^_Toc5109373875X_Toc5109373865R_Toc5109373855L_Toc5109373845F_Toc5109373835@_Toc5109373825:_Toc51093738154_Toc5109373805._Toc5109373795(_Toc5109373785"_Toc5109373775_Toc5109373765_Toc5109373755_Toc5109373745 _Toc5109373735_Toc5109373725_Toc5109373715_Toc5109373705_Toc5109373695_Toc5109373685_Toc5109373675_Toc5109373665_Toc5109373655_Toc5109373645_Toc5109373635_Toc5109373625_Toc5109373615_Toc5109373605_Toc5109373595_Toc5109373585_Toc5109373575_Toc5109373565_Toc5109373555_Toc5109373545_Toc5109373535_Toc5109373525_Toc5109373515_Toc5109373505z_Toc5109373495t_Toc5109373485n_Toc5109373475h_Toc5109373465b_Toc5109373455\_Toc5109373445V_Toc5109373435P_Toc5109373425J_Toc5109373415D_Toc5109373405>_Toc51093733958_Toc51093733852_Toc5109373375,_Toc5109373365&_Toc5109373355 _Toc5109373345_Toc5109373335_Toc5109373325_Toc5109373315_Toc5109373305_Toc510937329 ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( ^`OJQJo(o ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(@h^`) hh^h`OJQJo(@h^`B.h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( hh^h`OJQJo(h^`B*OJQJo(phh ^`OJQJo(h pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(@h^`. >^`>OJQJo( hh^h`OJQJo(hh^h`56CJOJQJo([]h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(oh @ @ ^@ `OJQJo(h ^`OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h PP^P`OJQJo(oh   ^ `OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo( z^`zOJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h^`B*OJQJo(phh ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(h ^`OJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(^`B*OJQJo(ph ^`OJQJo(o pp^p`OJQJo( @ @ ^@ `OJQJo( ^`OJQJo(o ^`OJQJo( ^`OJQJo( ^`OJQJo(o PP^P`OJQJo(h^`CJOJQJo(h ^`OJQJo(oh pp^p`OJQJo(h @ @ ^@ `OJQJo(h ^`OJQJo(oh ^`OJQJo(h ^`OJQJo(h ^`OJQJo(oh PP^P`OJQJo(hh^h`.Eyy[[4PDi '(?wf1 $X [Ga&H0R3%bKh9*ZqJbvGM\PE.][vDn+9oU1lsEt7.|DRU)80JBy;4O[0aN#C*4We;[ndY;uS)JGaH!9& /llw#\)GX0I+ iMDMo3OY_93WAM nY]S5#S)Z!0p:u*;-b=v9EU; Z+4}~|tb 9k}6 ;I*O?!&iS]DD0#DF@ID xvQV./CLJ>$XiWv M"1j{Vsltm=n\SKj!ft8m'LAyA*:^IC:hUPT_%L0 Wn+tf->~Egm# VC_'HL]+O|i~C*M8#D;\r?G [3PA_)w7-N gi?bC\> HOX =t64cx/u;`^}5%]F5;2RL&Be`"<,k t/lN~ XTP@3`2Fs*93y$g"]+HkML k]379CzUF"U29tAT }Ej:2]Z/;h]QCEh_ mqz_/S;yt%g?m ]3Yt3 x?2/e|:W[>JhQ#h$nz/;[_7E"dzU$a'owS` ``)"NE4/9GPH *gRo dtbnNi@=U`BN**G1^Xp[~5y%0YoHu3; Wjx~4<.\cF?RZ%0}:v'l)Fk!R[tk)?J: yjFA. @h ^`OJQJo( @h T^T`OJQJo(*x @h h^h`OJQJo(@7&YR+Le>fS7Gnɧ&@WXlݩީI{|1_ʫ:;S|ɬ)SmnEFOX jkt}67@Ie        * V ` u v x  X  ]  _ /|O2_anwxy%&'I U V _ h s t   )!*!!0"@"Y"Z"["b"k"t"}"""""""""""""""""""""""""""""""&&.&S&T&V&r&s&u&z&{&}&&&&&&&&&1CCCCCCCCCCCCCC DD(D)D*D+D,D-D?DSDTDUDzDDDDDDDDDDDDDDDEE E E,E-E.E/ELEMENEOEPEQERESETEUEVEcExEEEEEEEEEEEEEEEEEp]]]]]]]S^^_``2`m`bFcGctcceeefLf[hhhhijkk#kPkmOmPmqmmmmmn n.nQnRnqrrrr;sk͡Ρ;=?ACD]_acefǣ/02~֤פ٤x߬ #$?DIRZ[ĭ֭ͭޭ#+38=>0BVWXZvwy{!:;?ClmgVWmnpz{~./048=>X[^aboqsvw68:<=%/CDEIMRSuxz|} !qsvxy.0356@BEGH  8BVWX\`ef358:;68;=>/1367wy{~%'*,-GIKMN!$')*,V`tuvz~"$&'acfijCEHJKloqst QSVXYnpruvw  Z\_abEGIKLGILNO579;< ,/134LOQSTuwz}~469;<tvxz{ 68:<=acegh  479;<=ak02467NPSUVgilop]_bef   3demnPQ]^g    {!|!δklεXѶ_w23 ()JKwT&|&&&&&&&&&&&&&&&&&&&&&&&&&&''''''''%''')'*'+'4'5'7'8'9'K'L'M'N'O'e'g'h'i'j'}''''''''''''''''''''''''-~..../P/Q//7080d000ddddddddeJeKeqeee 7 L g h B P Q B P Q a p q # 2 C H \ v T _ p x                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _                           ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < , M p      5 n o ~   n9 9 9 9 9 ; ; ; ; ; ; < < < < < < = = = > > > ? ? ? @ @ @ kA lA mA B B B B C C C D D D E E E F F F G G G H H H jI kI lI iJ jJ kJ UK VK WK 4L 5L 6L M M M 4N 5N 9N PN hN wN xN {N O O O O O P P P P P ^Q _Q `Q aQ cQ _R `R aR bR dR R R R R R SS TS US VS XS RT ST TT UT WT T T T T T T T T T T U U U U U #V $V %V &V )V ~V V V V V KX LX MX NX QX X X X X X Y Y Y Y Y Z Z Z Z Z Z Z Z Z Z \ \ \ \ \ ] ] ] ] ] ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ m_ n_ o_ p_ s_ -` .` /` 0` ` ` ` ` ` ` a a a a Ib Jb Kb Lb c c c c c c c c c c c c ^d _d `d ad d d d d e e e e f f f f 1g 2g 3g 4g g g g g Qh Rh Sh Th h h h h :i ;i ? o p q r     j k l m : ; < = c d e f                 i j k l     N O P Q 2 3 4 5 } ~               R S T U         i j k l ` a b c                                 C D E F                   L M N O ! ! ! ! ! ! ! ! ! ! V" W" X" Y" " " " " f# g# h# i# # # # # $ $ $ $ N$ O$ X$ m$ n$ $ $ $ $ $ $ $ % % % % K% L% M% N% ~% % % % % % % % % % % % & & & & L& M& N& O& & & & & & & & & & & & & #' $' -' B' C' g' ' ' ' ' ' ' ' ' ' ' O( P( Q( R( q( r( s( t( ( ( ( ( ( ( ( ( ( ( ( ( () )) *) +) Q) R) S) T) ) ) ) ) ) ) ) ) * * * * :* ;* <* =* e* f* o* * * * * * * * * * + + + + 5+ 6+ 7+ 8+ [+ \+ ]+ ^+ + + + + + + + + + + + + , , , , (, ), *, +, S, T, U, V, , , , , , , , , , , , , 8- 9- :- ;- - - - - - - - - - - . . . D. ]. ^. h. q. . . . . . . . . . . . . . . / / / / @/ A/ B/ C/ i/ j/ k/ l/ / / / / / / / / 0 0 0 0 :0 ;0 <0 =0 `0 a0 b0 c0 0 0 0 0 0 0 0 0 0 0 0 0 +1 ,1 -1 .1 W1 X1 a1 v1 w1 1 1 1 1 1 1 1 2 2 2 2 ,2 -2 .2 /2 [2 \2 ]2 ^2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 73 83 93 :3 ^3 _3 `3 a3 3 3 3 3 3 3 3 3 3 m4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 :5 ;5 <5 =5 [5 \5 ]5 ^5 |5 }5 ~5 5 5 5 5 5 5 5 5 5 5 5 5 5 6 6 6 6 76 86 96 :6 [6 \6 ]6 ^6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 7 7 7 7 ;7 <7 =7 >7 a7 b7 c7 d7 7 7 7 7 7 7 7 7 7 7 8 8 8 8 A8 B8 `8 a8 b8 c8 p8 q8 r8 s8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 9 9 9 9 /9 09 19 29 J9 K9 L9 M9 b9 c9 d9 e9 y9 z9 {9 |9 }9 ~9 : : #; 0; N ;g =! "y%C ]!r=I%y 5m3w=?[ ;aY&[.cd  = +  + y9 N ` 1 _ -$ ' C* [- 51 3 7 @$"(,-,.N P@P6Pp@UnknownKalynnda BerenssaicGz Times New Roman5Symbol3& z ArialA& Trebuchet MSA& Arial NarrowSMonotype SortsSymbol;Wingdings9 Webdings7&  Verdana3z TimesG5  hMS Mincho-3 fgI&  Haettenschweiler5& zaTahoma;" Helvetica?5 z Courier New"1hpFpF;Su X ?u X ?YKf4d?3 ?3 `3H?m`fSNASA Software Safety GuidebookNASA-GB-1740.13NASA Glenn Research CenterFamilie Sleeckx                           ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~