From 5f996d565dcda100b861828b6731e3d732e5af49 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 13 Apr 2016 22:50:10 -0400 Subject: [PATCH 001/104] Initial implementation of predicate-based flow scanner and an incremental analyzer for flow graphs --- .../plugins/workflow/graph/FlowScanner.java | 317 ++++++++++++++++++ .../graph/IncrementalFlowAnalysis.java | 84 +++++ 2 files changed, 401 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java new file mode 100644 index 00000000..fc294c1c --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -0,0 +1,317 @@ +package org.jenkinsci.plugins.workflow.graph; + +/* + * The MIT License + * + * Copyright (c) 2013-2014, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +import com.google.common.base.Function; +import com.google.common.base.Predicate; +import hudson.model.Action; +import org.jenkinsci.plugins.workflow.actions.ErrorAction; +import org.jenkinsci.plugins.workflow.actions.LabelAction; +import org.jenkinsci.plugins.workflow.actions.LogAction; +import org.jenkinsci.plugins.workflow.actions.StageAction; +import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +/** + * Generified algorithms for scanning flows for information + * Supports a variety of algorithms for searching, and pluggable conditions + * Worth noting: predicates may be stateful here + * + * ANALYSIS method will + * @author Sam Van Oort + */ +public class FlowScanner { + /** Different ways of scannning the flow graph starting from one or more head nodes + * DEPTH_FIRST_ALL_PARENTS is the same as FlowWalker + * - scan through the first parents (depth first search), then come back to visit parallel branches + * BLOCK_SCOPES just skims through the blocks from the inside out, in reverse order + * SINGLE_PARENT only walks through the hierarchy of the first parent in the head (or heads) + */ + public enum ScanType { + DEPTH_FIRST_ALL_PARENTS, + BLOCK_SCOPES, + SINGLE_PARENT + } + + /** + * Create a predicate that will match on all FlowNodes having a specific action present + * @param actionClass Action class to look for + * @param Action type + * @return Predicate that will match when FlowNode has the action given + */ + public static Predicate createPredicateWhereActionExists(@Nonnull final Class actionClass) { + return new Predicate() { + @Override + public boolean apply(FlowNode input) { + return (input != null && input.getAction(actionClass) != null); + } + }; + } + + // Default predicates + static final Predicate MATCH_HAS_LABEL = createPredicateWhereActionExists(LabelAction.class); + static final Predicate MATCH_IS_STAGE = createPredicateWhereActionExists(StageAction.class); + static final Predicate MATCH_HAS_WORKSPACE = createPredicateWhereActionExists(WorkspaceAction.class); + static final Predicate MATCH_HAS_ERROR = createPredicateWhereActionExists(ErrorAction.class); + static final Predicate MATCH_HAS_LOG = createPredicateWhereActionExists(LogAction.class); + + /** One of many ways to scan the flowgraph */ + public interface ScanAlgorithm { + + /** + * Search for first node (walking from the heads through parents) that matches the condition + * @param heads Nodes to start searching from + * @param stopNodes Search doesn't go beyond any of these nodes, null or empty will run to end of flow + * @param matchPredicate Matching condition for search + * @return First node matching condition, or null if none found + */ + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); + + /** + * Search for first node (walking from the heads through parents) that matches the condition + * @param heads Nodes to start searching from + * @param stopNodes Search doesn't go beyond any of these nodes, null or empty will run to end of flow + * @param matchPredicate Matching condition for search + * @return All nodes matching condition + */ + @Nonnull + public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); + } + + /** Does a simple and efficient depth-first search */ + public static class DepthFirstScanner implements ScanAlgorithm { + + @Override + public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { + if (heads == null || heads.size() == 0) { + return null; + } + + HashSet visited = new HashSet(); + ArrayDeque queue = new ArrayDeque(heads); // Only used for parallel branches + + // Do what we need to for fast tests + Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; + if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { + fastStopNodes = new HashSet(fastStopNodes); + } + + // TODO this will probably be more efficient if we work with the first node + // or use a recursive solution for parallel forks + while (!queue.isEmpty()) { + FlowNode f = queue.pop(); + if (matchPredicate.apply(f)) { + return f; + } + visited.add(f); + List parents = f.getParents(); // Parents never null + for (FlowNode p : parents) { + if (!visited.contains(p) && !fastStopNodes.contains(p)) { + queue.push(p); + } + } + } + return null; + } + + @Override + public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { + if (heads == null || heads.size() == 0) { + return null; + } + + HashSet visited = new HashSet(); + ArrayDeque queue = new ArrayDeque(heads); // Only used for parallel branches + ArrayList matches = new ArrayList(); + + // Do what we need to for fast tests + Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; + if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { + fastStopNodes = new HashSet(fastStopNodes); + } + + // TODO this will probably be more efficient if use a variable for non-parallel flows and don't constantly push/pop array + while (!queue.isEmpty()) { + FlowNode f = queue.pop(); + if (matchPredicate.apply(f)) { + matches.add(f); + } + visited.add(f); + List parents = f.getParents(); // Parents never null + for (FlowNode p : parents) { + if (!visited.contains(p) && !fastStopNodes.contains(p)) { + queue.push(p); + } + } + } + return matches; + } + } + + /** + * Scans through a single ancestry, does not cover parallel branches + */ + public static class LinearScanner implements ScanAlgorithm { + + @Override + public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { + if (heads == null || heads.size() == 0) { + return null; + } + + // Do what we need to for fast tests + Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; + if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { + fastStopNodes = new HashSet(fastStopNodes); + } + + FlowNode current = heads.iterator().next(); + while (current != null) { + if (matchPredicate.apply(current)) { + return current; + } + List parents = current.getParents(); // Parents never null + current = null; + for (FlowNode p : parents) { + if (!fastStopNodes.contains(p)) { + current = p; + break; + } + } + } + return current; + } + + @Override + public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { + if (heads == null || heads.size() == 0) { + return null; + } + + // Do what we need to for fast tests + Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; + if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { + fastStopNodes = new HashSet(fastStopNodes); + } + ArrayList matches = new ArrayList(); + + FlowNode current = heads.iterator().next(); + while (current != null) { + if (matchPredicate.apply(current)) { + matches.add(current); + } + List parents = current.getParents(); // Parents never null + current = null; + for (FlowNode p : parents) { + if (!fastStopNodes.contains(p)) { + current = p; + break; + } + } + } + return matches; + } + } + + /** + * Scanner that jumps over nested blocks + */ + public static class BlockHoppingScanner implements ScanAlgorithm { + + @Override + public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { + if (heads == null || heads.size() == 0) { + return null; + } + + // Do what we need to for fast tests + Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; + if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { + fastStopNodes = new HashSet(fastStopNodes); + } + + FlowNode current = heads.iterator().next(); + while (current != null) { + if (!(current instanceof BlockEndNode) && matchPredicate.apply(current)) { + return current; + } else { // Hop the block + current = ((BlockEndNode) current).getStartNode(); + } + List parents = current.getParents(); // Parents never null + current = null; + for (FlowNode p : parents) { + if (!fastStopNodes.contains(p)) { + current = p; + break; + } + } + } + return current; + } + + @Override + public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { + if (heads == null || heads.size() == 0) { + return null; + } + + // Do what we need to for fast tests + Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; + if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { + fastStopNodes = new HashSet(fastStopNodes); + } + ArrayList matches = new ArrayList(); + + FlowNode current = heads.iterator().next(); + while (current != null) { + if (!(current instanceof BlockEndNode) && matchPredicate.apply(current)) { + matches.add(current); + } else { // Hop the block + current = ((BlockEndNode) current).getStartNode(); + } + List parents = current.getParents(); // Parents never null + current = null; + for (FlowNode p : parents) { + if (!fastStopNodes.contains(p)) { + current = p; + break; + } + } + } + return matches; + } + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java new file mode 100644 index 00000000..56eaff55 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java @@ -0,0 +1,84 @@ +package org.jenkinsci.plugins.workflow.graph; + +import com.google.common.base.Function; +import com.google.common.base.Predicate; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Provides incremental analysis of flow graphs, where updates are on the head + * @author Sam Van Oort + */ +public class IncrementalFlowAnalysis { + + protected static class IncrementalAnalysis { + protected List lastHeadIds = new ArrayList(); + protected T lastValue; + + /** Gets value from a flownode */ + protected Function valueExtractor; + + protected Predicate nodeMatchCondition; + + public IncrementalAnalysis(@Nonnull Predicate nodeMatchCondition, @Nonnull Function valueExtractFunction){ + this.nodeMatchCondition = nodeMatchCondition; + this.valueExtractor = valueExtractFunction; + } + + /** + * Look up a value scanned from the flow + * If the heads haven't changed in the flow, return the current heads + * If they have, only hunt from the current value until the last one + * @param exec + * @return + */ + @CheckForNull + public T getUpdatedValue(@CheckForNull FlowExecution exec) { + if (exec == null) { + return null; + } + List heads = exec.getCurrentHeads(); + if (heads != null && heads.size() == lastHeadIds.size()) { + boolean useCache = false; + for (FlowNode f : heads) { + if (lastHeadIds.contains(f.getId())) { + useCache = true; + break; + } + } + if (!useCache) { + update(exec); + } + return lastValue; + } + return null; + } + + protected void update(@Nonnull FlowExecution exec) { + ArrayList nodes = new ArrayList(); + for (String nodeId : this.lastHeadIds) { + try { + nodes.add(exec.getNode(nodeId)); + } catch (IOException ioe) { + throw new IllegalStateException(ioe); + } + } + FlowNode matchNode = new FlowScanner.BlockHoppingScanner().findFirstMatch(exec.getCurrentHeads(), nodes, this.nodeMatchCondition); + this.lastValue = this.valueExtractor.apply(matchNode); + + this.lastHeadIds.clear(); + for (FlowNode f : exec.getCurrentHeads()) { + lastHeadIds.add(f.getId()); + } + } + } + + static Cache analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); +} From ddb608dc2bea2c36a9aa470b0b38aad39f479937 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 14 Apr 2016 00:34:05 -0400 Subject: [PATCH 002/104] Pacify FindBugs --- .../org/jenkinsci/plugins/workflow/graph/FlowScanner.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index fc294c1c..36418551 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -150,7 +150,7 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckF @Override public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { if (heads == null || heads.size() == 0) { - return null; + return Collections.EMPTY_LIST; } HashSet visited = new HashSet(); @@ -218,7 +218,7 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckF @Override public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { if (heads == null || heads.size() == 0) { - return null; + return Collections.EMPTY_LIST; } // Do what we need to for fast tests @@ -285,7 +285,7 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckF @Override public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { if (heads == null || heads.size() == 0) { - return null; + return Collections.EMPTY_LIST; } // Do what we need to for fast tests From 2b51497f8e5d5db546647267139824e7f651a942 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 18 Apr 2016 01:49:23 -0400 Subject: [PATCH 003/104] Tests for the unrefactored FlowScanner --- pom.xml | 20 +- .../plugins/workflow/graph/FlowScanner.java | 19 +- .../workflow/graph/TestFlowScanner.java | 184 ++++++++++++++++++ 3 files changed, 219 insertions(+), 4 deletions(-) create mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java diff --git a/pom.xml b/pom.xml index 204a85f0..5fa4333b 100644 --- a/pom.xml +++ b/pom.xml @@ -48,7 +48,7 @@ scm:git:git@github.com:jenkinsci/${project.artifactId}-plugin.git https://github.com/jenkinsci/${project.artifactId}-plugin HEAD - + repo.jenkins-ci.org @@ -70,5 +70,23 @@ workflow-step-api 1.15 + + ${project.groupId} + workflow-job + 1.15 + test + + + + ${project.groupId} + workflow-cps + 2.2-SNAPSHOT + + + ${project.groupId} + workflow-basic-steps + 1.15 + test + diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 36418551..1f993a97 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -3,7 +3,7 @@ /* * The MIT License * - * Copyright (c) 2013-2014, CloudBees, Inc. + * Copyright (c) 2016, CloudBees, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -24,7 +24,6 @@ * THE SOFTWARE. */ -import com.google.common.base.Function; import com.google.common.base.Predicate; import hudson.model.Action; import org.jenkinsci.plugins.workflow.actions.ErrorAction; @@ -32,6 +31,7 @@ import org.jenkinsci.plugins.workflow.actions.LogAction; import org.jenkinsci.plugins.workflow.actions.StageAction; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; +import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; @@ -40,7 +40,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Set; @@ -87,6 +86,20 @@ public boolean apply(FlowNode input) { static final Predicate MATCH_HAS_ERROR = createPredicateWhereActionExists(ErrorAction.class); static final Predicate MATCH_HAS_LOG = createPredicateWhereActionExists(LogAction.class); + public static Predicate createPredicateForStepNodeWithDescriptor(final String descriptorId) { + Predicate outputPredicate = new Predicate() { + @Override + public boolean apply(FlowNode input) { + if (input instanceof StepAtomNode) { + StepAtomNode san = (StepAtomNode)input; + return descriptorId.equals(san.getDescriptor().getId()); + } + return false; + } + }; + return outputPredicate; + } + /** One of many ways to scan the flowgraph */ public interface ScanAlgorithm { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java new file mode 100644 index 00000000..9738372e --- /dev/null +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -0,0 +1,184 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graph; + +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.jvnet.hudson.test.BuildWatcher; +import org.jvnet.hudson.test.JenkinsRule; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class TestFlowScanner { + + @ClassRule + public static BuildWatcher buildWatcher = new BuildWatcher(); + + @Rule public JenkinsRule r = new JenkinsRule(); + + + /** Tests the basic scan algorithm, predicate use, start/stop nodes */ + @Test + public void testSimpleScan() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "sleep 2 \n" + + "echo 'donothing'\n" + + "echo 'doitagain'" + )); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowExecution exec = b.getExecution(); + FlowScanner.ScanAlgorithm[] scans = {new FlowScanner.LinearScanner(), + new FlowScanner.DepthFirstScanner(), + new FlowScanner.BlockHoppingScanner()}; + + Predicate echoPredicate = FlowScanner.createPredicateForStepNodeWithDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + List heads = b.getExecution().getCurrentHeads(); + + // Test expected scans with no stop nodes given (different ways of specifying none) + for (FlowScanner.ScanAlgorithm sa : scans) { + FlowNode node = sa.findFirstMatch(heads, null, echoPredicate); + Assert.assertEquals(exec.getNode("5"), node); + node = sa.findFirstMatch(heads, Collections.EMPTY_LIST, echoPredicate); + Assert.assertEquals(exec.getNode("5"), node); + node = sa.findFirstMatch(heads, Collections.EMPTY_SET, echoPredicate); + Assert.assertEquals(exec.getNode("5"), node); + + Collection nodeList = sa.findAllMatches(heads, null, echoPredicate); + FlowNode[] expected = new FlowNode[]{exec.getNode("5"), exec.getNode("4")}; + Assert.assertArrayEquals(expected, nodeList.toArray()); + nodeList = sa.findAllMatches(heads, Collections.EMPTY_LIST, echoPredicate); + Assert.assertArrayEquals(expected, nodeList.toArray()); + nodeList = sa.findAllMatches(heads, Collections.EMPTY_SET, echoPredicate); + Assert.assertArrayEquals(expected, nodeList.toArray()); + } + + // Test with no matches + for (FlowScanner.ScanAlgorithm sa : scans) { + FlowNode node = sa.findFirstMatch(heads, null, (Predicate)Predicates.alwaysFalse()); + Assert.assertNull(node); + + Collection nodeList = sa.findAllMatches(heads, null, (Predicate)Predicates.alwaysFalse()); + Assert.assertNotNull(nodeList); + Assert.assertEquals(0, nodeList.size()); + } + + // Test with a stop node given, sometimes no matches + Collection noMatchEndNode = Collections.singleton(exec.getNode("5")); + Collection singleMatchEndNode = Collections.singleton(exec.getNode("4")); + for (FlowScanner.ScanAlgorithm sa : scans) { + FlowNode node = sa.findFirstMatch(heads, noMatchEndNode, echoPredicate); + Assert.assertNull(node); + + Collection nodeList = sa.findAllMatches(heads, noMatchEndNode, echoPredicate); + Assert.assertNotNull(nodeList); + Assert.assertEquals(0, nodeList.size()); + + // Now we try with a stop list the reduces node set for multiple matches + node = sa.findFirstMatch(heads, singleMatchEndNode, echoPredicate); + Assert.assertEquals(exec.getNode("5"), node); + nodeList = sa.findAllMatches(heads, singleMatchEndNode, echoPredicate); + Assert.assertNotNull(nodeList); + Assert.assertEquals(1, nodeList.size()); + Assert.assertEquals(exec.getNode("5"), nodeList.iterator().next()); + } + } + + /** Tests the basic scan algorithm where blocks are involved */ + @Test + public void blockScan() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "echo 'first'\n" + + "timeout(time: 10, unit: 'SECONDS') {\n" + + " echo 'second'\n" + + " echo 'third'\n" + + "}\n" + + "sleep 1" + )); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowScanner.ScanAlgorithm[] scans = {new FlowScanner.LinearScanner(), + new FlowScanner.DepthFirstScanner(), + new FlowScanner.BlockHoppingScanner()}; + for (FlowScanner.ScanAlgorithm sa : scans) { + + } + FlowGraphWalker walk = new FlowGraphWalker(b.getExecution()); + ArrayList flows = new ArrayList(); + for (FlowNode f : walk) { + flows.add(f); + } + + } + + + @Test + public void testme() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "echo 'pre-stage command'\n" + + "sleep 1\n" + + "stage 'first'\n" + + "echo 'I ran'\n" + + "stage 'second'\n" + + "node {\n" + + " def steps = [:]\n" + + " steps['2a-dir'] = {\n" + + " echo 'do 2a stuff'\n" + + " echo 'do more 2a stuff'\n" + + " timeout(time: 10, unit: 'SECONDS') {\n" + + " stage 'invalid'\n" + + " echo 'time seconds'\n" + + " }\n" + + " sleep 15\n" + + " }\n" + + " steps['2b'] = {\n" + + " echo 'do 2b stuff'\n" + + " sleep 10\n" + + " echo 'echo_label_me'\n" + + " }\n" + + " parallel steps\n" + + "}\n" + + "\n" + + "stage 'final'\n" + + "echo 'ran final 1'\n" + + "echo 'ran final 2'" + )); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowGraphWalker walker = new FlowGraphWalker(); + walker.addHeads(b.getExecution().getCurrentHeads()); + } +} \ No newline at end of file From bb0864e9cef64fd5dd92c34abc14af5de70d2f3d Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 18 Apr 2016 19:15:20 -0400 Subject: [PATCH 004/104] WIP on pulling out generalized flowscanning algorithms --- .../plugins/workflow/graph/FlowScanner.java | 189 +++++++++++++++--- .../workflow/graph/TestFlowScanner.java | 2 +- 2 files changed, 157 insertions(+), 34 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 1f993a97..77637454 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -40,6 +40,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Set; @@ -100,7 +101,8 @@ public boolean apply(FlowNode input) { return outputPredicate; } - /** One of many ways to scan the flowgraph */ + /** Interface to be used for scanning/analyzing FlowGraphs with support for different visit orders + */ public interface ScanAlgorithm { /** @@ -124,23 +126,142 @@ public interface ScanAlgorithm { public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); } + /** + * Base class for flow scanners, which offers basic methods and stubs for algorithms + * Scanners store state internally, and are not thread-safe but are reusable + * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies + * However internal iteration has access to additional information + */ + public static abstract class AbstractFlowScanner implements ScanAlgorithm { + + // State variables, not all need be used + protected ArrayDeque _queue; + protected FlowNode _current; + + // Public APIs need to invoke this before searches + protected abstract void initialize(); + + /** + * Actual meat of the iteration, get the next node to visit, using & updating state as needed + * @param f Node to look for parents of (usually _current) + * @param blackList Nodes that are not eligible for visiting + * @return Next node to visit, or null if we've exhausted the node list + */ + @CheckForNull + protected abstract FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList); + + + /** Fast internal scan from start through single-parent (unbranched) nodes until we hit a node with one of the following: + * - Multiple parents + * - No parents + * - Satisfies the endCondition predicate + * + * @param endCondition Predicate that ends search + * @return Node satisfying condition + */ + @CheckForNull + protected static FlowNode linearScanUntil(@Nonnull FlowNode start, @Nonnull Predicate endCondition) { + while(true) { + if (endCondition.apply(start)){ + break; + } + List parents = start.getParents(); + if (parents == null || parents.size() == 0 || parents.size() > 1) { + break; + } + start = parents.get(0); + } + return start; + } + + /** Convert stop nodes to a collection that can efficiently be checked for membership, handling nulls if needed */ + @Nonnull + protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { + if (nodeCollection == null || nodeCollection.size()==0) { + return Collections.EMPTY_SET; + } else if (nodeCollection instanceof Set) { + return nodeCollection; + } + return nodeCollection.size() > 5 ? new HashSet(nodeCollection) : nodeCollection; + } + + public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.findFirstMatch(heads, null, matchPredicate); + } + + public Collection findAllMatches(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.findAllMatches(heads, null, matchPredicate); + } + + // Basic algo impl + protected FlowNode findFirstMatchBasic(@CheckForNull Collection heads, + @CheckForNull Collection endNodes, + Predicate matchCondition) { + if (heads == null || heads.size() == 0) { + return null; + } + initialize(); + Collection fastEndNodes = convertToFastCheckable(endNodes); + + while((this._current = this.next(_current, fastEndNodes)) != null) { + if (matchCondition.apply(this._current)) { + return this._current; + } + } + return null; + } + + // Basic algo impl + protected List findAllMatchesBasic(@CheckForNull Collection heads, + @CheckForNull Collection endNodes, + Predicate matchCondition) { + if (heads == null || heads.size() == 0) { + return null; + } + initialize(); + Collection fastEndNodes = convertToFastCheckable(endNodes); + ArrayList nodes = new ArrayList(); + + while((this._current = this.next(_current, fastEndNodes)) != null) { + if (matchCondition.apply(this._current)) { + nodes.add(this._current); + } + } + return nodes; + } + } + /** Does a simple and efficient depth-first search */ - public static class DepthFirstScanner implements ScanAlgorithm { + public static class DepthFirstScanner extends AbstractFlowScanner { + + protected HashSet _visited = new HashSet(); + + protected void initialize() { + if (this._queue == null) { + this._queue = new ArrayDeque(); + } else { + this._queue.clear(); + } + this._visited.clear(); + this._current = null; + } + + @Override + protected FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList) { + // Check for visited and stuff? + return null; + } @Override public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { if (heads == null || heads.size() == 0) { return null; } + initialize(); HashSet visited = new HashSet(); ArrayDeque queue = new ArrayDeque(heads); // Only used for parallel branches - - // Do what we need to for fast tests - Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; - if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { - fastStopNodes = new HashSet(fastStopNodes); - } + Collection fastStopNodes = convertToFastCheckable(stopNodes); // TODO this will probably be more efficient if we work with the first node // or use a recursive solution for parallel forks @@ -169,12 +290,7 @@ public Collection findAllMatches(@CheckForNull Collection he HashSet visited = new HashSet(); ArrayDeque queue = new ArrayDeque(heads); // Only used for parallel branches ArrayList matches = new ArrayList(); - - // Do what we need to for fast tests - Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; - if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { - fastStopNodes = new HashSet(fastStopNodes); - } + Collection fastStopNodes = convertToFastCheckable(stopNodes); // TODO this will probably be more efficient if use a variable for non-parallel flows and don't constantly push/pop array while (!queue.isEmpty()) { @@ -197,7 +313,7 @@ public Collection findAllMatches(@CheckForNull Collection he /** * Scans through a single ancestry, does not cover parallel branches */ - public static class LinearScanner implements ScanAlgorithm { + public static class LinearScanner extends AbstractFlowScanner { @Override public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { @@ -205,11 +321,7 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckF return null; } - // Do what we need to for fast tests - Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; - if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { - fastStopNodes = new HashSet(fastStopNodes); - } + Collection fastStopNodes = convertToFastCheckable(stopNodes); FlowNode current = heads.iterator().next(); while (current != null) { @@ -235,10 +347,7 @@ public Collection findAllMatches(@CheckForNull Collection he } // Do what we need to for fast tests - Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; - if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { - fastStopNodes = new HashSet(fastStopNodes); - } + Collection fastStopNodes = convertToFastCheckable(stopNodes); ArrayList matches = new ArrayList(); FlowNode current = heads.iterator().next(); @@ -257,12 +366,22 @@ public Collection findAllMatches(@CheckForNull Collection he } return matches; } + + @Override + protected void initialize() { + // no-op for us + } + + @Override + protected FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList) { + return null; + } } /** * Scanner that jumps over nested blocks */ - public static class BlockHoppingScanner implements ScanAlgorithm { + public static class BlockHoppingScanner extends AbstractFlowScanner { @Override public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { @@ -271,10 +390,7 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckF } // Do what we need to for fast tests - Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; - if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { - fastStopNodes = new HashSet(fastStopNodes); - } + Collection fastStopNodes = convertToFastCheckable(stopNodes); FlowNode current = heads.iterator().next(); while (current != null) { @@ -302,10 +418,7 @@ public Collection findAllMatches(@CheckForNull Collection he } // Do what we need to for fast tests - Collection fastStopNodes = (stopNodes == null || stopNodes.size() == 0) ? Collections.EMPTY_SET : stopNodes; - if (fastStopNodes.size() > 10 && !(fastStopNodes instanceof Set)) { - fastStopNodes = new HashSet(fastStopNodes); - } + Collection fastStopNodes = convertToFastCheckable(stopNodes); ArrayList matches = new ArrayList(); FlowNode current = heads.iterator().next(); @@ -326,5 +439,15 @@ public Collection findAllMatches(@CheckForNull Collection he } return matches; } + + @Override + protected void initialize() { + + } + + @Override + protected FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList) { + return null; + } } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 9738372e..9028396b 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -66,7 +66,7 @@ public void testSimpleScan() throws Exception { new FlowScanner.BlockHoppingScanner()}; Predicate echoPredicate = FlowScanner.createPredicateForStepNodeWithDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); - List heads = b.getExecution().getCurrentHeads(); + List heads = exec.getCurrentHeads(); // Test expected scans with no stop nodes given (different ways of specifying none) for (FlowScanner.ScanAlgorithm sa : scans) { From f4de17485cd42042caafaa3449645c3e7f3653ea Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 19 Apr 2016 02:08:31 -0400 Subject: [PATCH 005/104] Flesh out/fix most of the flow scanner implementations --- .../plugins/workflow/graph/FlowScanner.java | 223 ++++++------------ .../workflow/graph/TestFlowScanner.java | 25 +- 2 files changed, 83 insertions(+), 165 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 77637454..b77ae46d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -53,17 +53,6 @@ * @author Sam Van Oort */ public class FlowScanner { - /** Different ways of scannning the flow graph starting from one or more head nodes - * DEPTH_FIRST_ALL_PARENTS is the same as FlowWalker - * - scan through the first parents (depth first search), then come back to visit parallel branches - * BLOCK_SCOPES just skims through the blocks from the inside out, in reverse order - * SINGLE_PARENT only walks through the hierarchy of the first parent in the head (or heads) - */ - public enum ScanType { - DEPTH_FIRST_ALL_PARENTS, - BLOCK_SCOPES, - SINGLE_PARENT - } /** * Create a predicate that will match on all FlowNodes having a specific action present @@ -71,6 +60,7 @@ public enum ScanType { * @param Action type * @return Predicate that will match when FlowNode has the action given */ + @Nonnull public static Predicate createPredicateWhereActionExists(@Nonnull final Class actionClass) { return new Predicate() { @Override @@ -87,7 +77,7 @@ public boolean apply(FlowNode input) { static final Predicate MATCH_HAS_ERROR = createPredicateWhereActionExists(ErrorAction.class); static final Predicate MATCH_HAS_LOG = createPredicateWhereActionExists(LogAction.class); - public static Predicate createPredicateForStepNodeWithDescriptor(final String descriptorId) { + public static Predicate predicateMatchStepDescriptor(final String descriptorId) { Predicate outputPredicate = new Predicate() { @Override public boolean apply(FlowNode input) { @@ -141,14 +131,15 @@ public static abstract class AbstractFlowScanner implements ScanAlgorithm { // Public APIs need to invoke this before searches protected abstract void initialize(); + protected abstract void setHeads(@Nonnull Collection heads); + /** * Actual meat of the iteration, get the next node to visit, using & updating state as needed - * @param f Node to look for parents of (usually _current) * @param blackList Nodes that are not eligible for visiting * @return Next node to visit, or null if we've exhausted the node list */ @CheckForNull - protected abstract FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList); + protected abstract FlowNode next(@Nonnull Collection blackList); /** Fast internal scan from start through single-parent (unbranched) nodes until we hit a node with one of the following: @@ -185,46 +176,50 @@ protected Collection convertToFastCheckable(@CheckForNull Collection 5 ? new HashSet(nodeCollection) : nodeCollection; } + @CheckForNull public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.findFirstMatch(heads, null, matchPredicate); } + @Nonnull public Collection findAllMatches(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.findAllMatches(heads, null, matchPredicate); } // Basic algo impl - protected FlowNode findFirstMatchBasic(@CheckForNull Collection heads, + public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection endNodes, Predicate matchCondition) { if (heads == null || heads.size() == 0) { return null; } initialize(); + this.setHeads(heads); Collection fastEndNodes = convertToFastCheckable(endNodes); - while((this._current = this.next(_current, fastEndNodes)) != null) { - if (matchCondition.apply(this._current)) { - return this._current; + while ((_current = next(fastEndNodes)) != null) { + if (matchCondition.apply(_current)) { + return _current; } } return null; } // Basic algo impl - protected List findAllMatchesBasic(@CheckForNull Collection heads, + public List findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection endNodes, Predicate matchCondition) { if (heads == null || heads.size() == 0) { return null; } initialize(); + this.setHeads(heads); Collection fastEndNodes = convertToFastCheckable(endNodes); ArrayList nodes = new ArrayList(); - while((this._current = this.next(_current, fastEndNodes)) != null) { - if (matchCondition.apply(this._current)) { - nodes.add(this._current); + while ((_current = next(fastEndNodes)) != null) { + if (matchCondition.apply(_current)) { + nodes.add(_current); } } return nodes; @@ -247,66 +242,34 @@ protected void initialize() { } @Override - protected FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList) { - // Check for visited and stuff? - return null; + protected void setHeads(@Nonnull Collection heads) { + // Needs to handle blacklist + _queue.addAll(heads); } @Override - public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { - if (heads == null || heads.size() == 0) { - return null; - } - initialize(); - - HashSet visited = new HashSet(); - ArrayDeque queue = new ArrayDeque(heads); // Only used for parallel branches - Collection fastStopNodes = convertToFastCheckable(stopNodes); - - // TODO this will probably be more efficient if we work with the first node - // or use a recursive solution for parallel forks - while (!queue.isEmpty()) { - FlowNode f = queue.pop(); - if (matchPredicate.apply(f)) { - return f; - } - visited.add(f); - List parents = f.getParents(); // Parents never null - for (FlowNode p : parents) { - if (!visited.contains(p) && !fastStopNodes.contains(p)) { - queue.push(p); + protected FlowNode next(@Nonnull Collection blackList) { + FlowNode output = null; + if (_current != null) { + List parents = _current.getParents(); + if (parents != null) { + for (FlowNode f : parents) { + if (!blackList.contains(f) && !_visited.contains(f)) { + if (output != null ) { + output = f; + } else { + _queue.push(f); + } + } } } } - return null; - } - @Override - public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { - if (heads == null || heads.size() == 0) { - return Collections.EMPTY_LIST; - } - - HashSet visited = new HashSet(); - ArrayDeque queue = new ArrayDeque(heads); // Only used for parallel branches - ArrayList matches = new ArrayList(); - Collection fastStopNodes = convertToFastCheckable(stopNodes); - - // TODO this will probably be more efficient if use a variable for non-parallel flows and don't constantly push/pop array - while (!queue.isEmpty()) { - FlowNode f = queue.pop(); - if (matchPredicate.apply(f)) { - matches.add(f); - } - visited.add(f); - List parents = f.getParents(); // Parents never null - for (FlowNode p : parents) { - if (!visited.contains(p) && !fastStopNodes.contains(p)) { - queue.push(p); - } - } + if (output == null && _queue.size() > 0) { + output = _queue.pop(); } - return matches; + _visited.add(output); + return output; } } @@ -316,64 +279,30 @@ public Collection findAllMatches(@CheckForNull Collection he public static class LinearScanner extends AbstractFlowScanner { @Override - public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { - if (heads == null || heads.size() == 0) { - return null; - } - - Collection fastStopNodes = convertToFastCheckable(stopNodes); + protected void initialize() { + // no-op for us + } - FlowNode current = heads.iterator().next(); - while (current != null) { - if (matchPredicate.apply(current)) { - return current; - } - List parents = current.getParents(); // Parents never null - current = null; - for (FlowNode p : parents) { - if (!fastStopNodes.contains(p)) { - current = p; - break; - } - } + @Override + protected void setHeads(@Nonnull Collection heads) { + if (heads.size() > 0) { + this._current = heads.iterator().next(); } - return current; } @Override - public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { - if (heads == null || heads.size() == 0) { - return Collections.EMPTY_LIST; + protected FlowNode next(@Nonnull Collection blackList) { + if (_current == null) { + return null; } - - // Do what we need to for fast tests - Collection fastStopNodes = convertToFastCheckable(stopNodes); - ArrayList matches = new ArrayList(); - - FlowNode current = heads.iterator().next(); - while (current != null) { - if (matchPredicate.apply(current)) { - matches.add(current); - } - List parents = current.getParents(); // Parents never null - current = null; - for (FlowNode p : parents) { - if (!fastStopNodes.contains(p)) { - current = p; - break; + List parents = _current.getParents(); + if (parents != null || parents.size() > 0) { + for (FlowNode f : parents) { + if (!blackList.contains(f)) { + return f; } } } - return matches; - } - - @Override - protected void initialize() { - // no-op for us - } - - @Override - protected FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList) { return null; } } @@ -411,33 +340,9 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckF return current; } - @Override - public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { - if (heads == null || heads.size() == 0) { - return Collections.EMPTY_LIST; - } - - // Do what we need to for fast tests - Collection fastStopNodes = convertToFastCheckable(stopNodes); - ArrayList matches = new ArrayList(); - - FlowNode current = heads.iterator().next(); - while (current != null) { - if (!(current instanceof BlockEndNode) && matchPredicate.apply(current)) { - matches.add(current); - } else { // Hop the block - current = ((BlockEndNode) current).getStartNode(); - } - List parents = current.getParents(); // Parents never null - current = null; - for (FlowNode p : parents) { - if (!fastStopNodes.contains(p)) { - current = p; - break; - } - } - } - return matches; + protected FlowNode jumpBlock(FlowNode current) { + return (current instanceof BlockEndNode) ? + ((BlockEndNode)current).getStartNode() : current; } @Override @@ -446,7 +351,23 @@ protected void initialize() { } @Override - protected FlowNode next(@CheckForNull FlowNode f, @Nonnull Collection blackList) { + protected void setHeads(@Nonnull Collection heads) { + _queue.addAll(heads); + } + + @Override + protected FlowNode next(@Nonnull Collection blackList) { + if (_current == null) { + return null; + } + List parents = _current.getParents(); + if (parents != null || parents.size() > 0) { + for (FlowNode f : parents) { + if (!blackList.contains(f)) { + return f; + } + } + } return null; } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 9028396b..d502ae70 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -49,7 +49,6 @@ public class TestFlowScanner { @Rule public JenkinsRule r = new JenkinsRule(); - /** Tests the basic scan algorithm, predicate use, start/stop nodes */ @Test public void testSimpleScan() throws Exception { @@ -63,13 +62,15 @@ public void testSimpleScan() throws Exception { FlowExecution exec = b.getExecution(); FlowScanner.ScanAlgorithm[] scans = {new FlowScanner.LinearScanner(), new FlowScanner.DepthFirstScanner(), - new FlowScanner.BlockHoppingScanner()}; + new FlowScanner.BlockHoppingScanner() + }; - Predicate echoPredicate = FlowScanner.createPredicateForStepNodeWithDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + Predicate echoPredicate = FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); List heads = exec.getCurrentHeads(); // Test expected scans with no stop nodes given (different ways of specifying none) for (FlowScanner.ScanAlgorithm sa : scans) { + System.out.println("Testing class: "+sa.getClass()); FlowNode node = sa.findFirstMatch(heads, null, echoPredicate); Assert.assertEquals(exec.getNode("5"), node); node = sa.findFirstMatch(heads, Collections.EMPTY_LIST, echoPredicate); @@ -119,7 +120,7 @@ public void testSimpleScan() throws Exception { /** Tests the basic scan algorithm where blocks are involved */ @Test - public void blockScan() throws Exception { + public void testBlockScan() throws Exception { WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); job.setDefinition(new CpsFlowDefinition( "echo 'first'\n" + @@ -130,18 +131,14 @@ public void blockScan() throws Exception { "sleep 1" )); WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - FlowScanner.ScanAlgorithm[] scans = {new FlowScanner.LinearScanner(), - new FlowScanner.DepthFirstScanner(), - new FlowScanner.BlockHoppingScanner()}; - for (FlowScanner.ScanAlgorithm sa : scans) { - } - FlowGraphWalker walk = new FlowGraphWalker(b.getExecution()); - ArrayList flows = new ArrayList(); - for (FlowNode f : walk) { - flows.add(f); - } + // Test blockhopping + FlowScanner.BlockHoppingScanner blockHoppingScanner = new FlowScanner.BlockHoppingScanner(); + Collection matches = blockHoppingScanner.findAllMatches(b.getExecution().getCurrentHeads(), null, + FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep")); + // This means we jumped the blocks + Assert.assertEquals(1, matches.size()); } From d41472305c2db9144592c8c01dc9abdf73a2dcf8 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 19 Apr 2016 05:13:30 -0400 Subject: [PATCH 006/104] Finish tests for FlowScanner algorithms and fix remaining edge cases --- .../plugins/workflow/graph/FlowScanner.java | 68 ++++++---------- .../workflow/graph/TestFlowScanner.java | 77 +++++++++++-------- 2 files changed, 69 insertions(+), 76 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index b77ae46d..86f4468c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -193,9 +193,12 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, if (heads == null || heads.size() == 0) { return null; } + initialize(); - this.setHeads(heads); Collection fastEndNodes = convertToFastCheckable(endNodes); + Collection filteredHeads = new HashSet(heads); + filteredHeads.removeAll(fastEndNodes); + this.setHeads(filteredHeads); while ((_current = next(fastEndNodes)) != null) { if (matchCondition.apply(_current)) { @@ -213,8 +216,10 @@ public List findAllMatches(@CheckForNull Collection heads, return null; } initialize(); - this.setHeads(heads); Collection fastEndNodes = convertToFastCheckable(endNodes); + Collection filteredHeads = new HashSet(heads); + filteredHeads.removeAll(fastEndNodes); + this.setHeads(filteredHeads); ArrayList nodes = new ArrayList(); while ((_current = next(fastEndNodes)) != null) { @@ -277,10 +282,11 @@ protected FlowNode next(@Nonnull Collection blackList) { * Scans through a single ancestry, does not cover parallel branches */ public static class LinearScanner extends AbstractFlowScanner { + protected boolean isFirst = true; @Override protected void initialize() { - // no-op for us + isFirst = true; } @Override @@ -295,6 +301,10 @@ protected FlowNode next(@Nonnull Collection blackList) { if (_current == null) { return null; } + if (isFirst) { // Kind of cheating, but works + isFirst = false; + return _current; + } List parents = _current.getParents(); if (parents != null || parents.size() > 0) { for (FlowNode f : parents) { @@ -310,61 +320,33 @@ protected FlowNode next(@Nonnull Collection blackList) { /** * Scanner that jumps over nested blocks */ - public static class BlockHoppingScanner extends AbstractFlowScanner { - - @Override - public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate) { - if (heads == null || heads.size() == 0) { - return null; - } - - // Do what we need to for fast tests - Collection fastStopNodes = convertToFastCheckable(stopNodes); - - FlowNode current = heads.iterator().next(); - while (current != null) { - if (!(current instanceof BlockEndNode) && matchPredicate.apply(current)) { - return current; - } else { // Hop the block - current = ((BlockEndNode) current).getStartNode(); - } - List parents = current.getParents(); // Parents never null - current = null; - for (FlowNode p : parents) { - if (!fastStopNodes.contains(p)) { - current = p; - break; - } - } - } - return current; - } + public static class BlockHoppingScanner extends LinearScanner { protected FlowNode jumpBlock(FlowNode current) { return (current instanceof BlockEndNode) ? ((BlockEndNode)current).getStartNode() : current; } - @Override - protected void initialize() { - - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - _queue.addAll(heads); - } - @Override protected FlowNode next(@Nonnull Collection blackList) { if (_current == null) { return null; } + if (isFirst) { // Hax, but solves the problem + isFirst = false; + return _current; + } List parents = _current.getParents(); if (parents != null || parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { - return f; + FlowNode jumped = jumpBlock(f); + if (jumped != f) { + _current = jumped; + return next(blackList); + } else { + return f; + } } } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index d502ae70..094781ec 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -89,6 +89,7 @@ public void testSimpleScan() throws Exception { // Test with no matches for (FlowScanner.ScanAlgorithm sa : scans) { + System.out.println("Testing class: "+sa.getClass()); FlowNode node = sa.findFirstMatch(heads, null, (Predicate)Predicates.alwaysFalse()); Assert.assertNull(node); @@ -97,6 +98,13 @@ public void testSimpleScan() throws Exception { Assert.assertEquals(0, nodeList.size()); } + // Verify we touch head and foot nodes too + for (FlowScanner.ScanAlgorithm sa : scans) { + System.out.println("Testing class: "+sa.getClass()); + Collection nodeList = sa.findAllMatches(heads, null, (Predicate)Predicates.alwaysTrue()); + Assert.assertEquals(5, nodeList.size()); + } + // Test with a stop node given, sometimes no matches Collection noMatchEndNode = Collections.singleton(exec.getNode("5")); Collection singleMatchEndNode = Collections.singleton(exec.getNode("4")); @@ -131,51 +139,54 @@ public void testBlockScan() throws Exception { "sleep 1" )); WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + Predicate matchEchoStep = FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); // Test blockhopping FlowScanner.BlockHoppingScanner blockHoppingScanner = new FlowScanner.BlockHoppingScanner(); - Collection matches = blockHoppingScanner.findAllMatches(b.getExecution().getCurrentHeads(), null, - FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep")); + Collection matches = blockHoppingScanner.findAllMatches(b.getExecution().getCurrentHeads(), null, matchEchoStep); // This means we jumped the blocks Assert.assertEquals(1, matches.size()); - } + FlowScanner.DepthFirstScanner depthFirstScanner = new FlowScanner.DepthFirstScanner(); + matches = depthFirstScanner.findAllMatches(b.getExecution().getCurrentHeads(), null, matchEchoStep); + + // Nodes all covered + Assert.assertEquals(3, matches.size()); + } + /** And the parallel case */ @Test - public void testme() throws Exception { + public void testParallelScan() throws Exception { WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); job.setDefinition(new CpsFlowDefinition( - "echo 'pre-stage command'\n" + - "sleep 1\n" + - "stage 'first'\n" + - "echo 'I ran'\n" + - "stage 'second'\n" + - "node {\n" + - " def steps = [:]\n" + - " steps['2a-dir'] = {\n" + - " echo 'do 2a stuff'\n" + - " echo 'do more 2a stuff'\n" + - " timeout(time: 10, unit: 'SECONDS') {\n" + - " stage 'invalid'\n" + - " echo 'time seconds'\n" + - " }\n" + - " sleep 15\n" + - " }\n" + - " steps['2b'] = {\n" + - " echo 'do 2b stuff'\n" + - " sleep 10\n" + - " echo 'echo_label_me'\n" + - " }\n" + - " parallel steps\n" + - "}\n" + - "\n" + - "stage 'final'\n" + - "echo 'ran final 1'\n" + - "echo 'ran final 2'" + "echo 'first'\n" + + "def steps = [:]\n" + + "steps['1'] = {\n" + + " echo 'do 1 stuff'\n" + + "}\n" + + "steps['2'] = {\n" + + " echo '2a'\n" + + " echo '2b'\n" + + "}\n" + + "parallel steps\n" + + "echo 'final'" )); WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - FlowGraphWalker walker = new FlowGraphWalker(); - walker.addHeads(b.getExecution().getCurrentHeads()); + Collection heads = b.getExecution().getCurrentHeads(); + Predicate matchEchoStep = FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + + FlowScanner.ScanAlgorithm scanner = new FlowScanner.LinearScanner(); + Collection matches = scanner.findAllMatches(heads, null, matchEchoStep); + Assert.assertTrue(matches.size() >= 3 && matches.size() <= 4); + + scanner = new FlowScanner.DepthFirstScanner(); + matches = scanner.findAllMatches(heads, null, matchEchoStep); + Assert.assertTrue(matches.size() == 5); + + scanner = new FlowScanner.BlockHoppingScanner(); + matches = scanner.findAllMatches(heads, null, matchEchoStep); + Assert.assertTrue(matches.size() == 2); } + } \ No newline at end of file From 4c8faa47f10fd99f2a3ea875e496e80f3a3de1ea Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 19 Apr 2016 05:36:50 -0400 Subject: [PATCH 007/104] Add visitor method to FlowScanner for collecting stats while walking the graph --- .../plugins/workflow/graph/FlowScanner.java | 28 +++++++++++++++++++ .../workflow/graph/TestFlowScanner.java | 26 ++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 86f4468c..1f366aec 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -25,6 +25,7 @@ */ import com.google.common.base.Predicate; +import com.sun.tools.javac.comp.Flow; import hudson.model.Action; import org.jenkinsci.plugins.workflow.actions.ErrorAction; import org.jenkinsci.plugins.workflow.actions.LabelAction; @@ -91,6 +92,15 @@ public boolean apply(FlowNode input) { return outputPredicate; } + public interface FlowNodeVisitor { + /** + * Visit the flow node, and indicate if we should continue analysis + * @param f Node to visit + * @return False if node is done + */ + public boolean visit(@Nonnull FlowNode f); + } + /** Interface to be used for scanning/analyzing FlowGraphs with support for different visit orders */ public interface ScanAlgorithm { @@ -114,6 +124,9 @@ public interface ScanAlgorithm { */ @Nonnull public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); + + /** Used for extracting metrics from the flow graph */ + public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor); } /** @@ -229,6 +242,21 @@ public List findAllMatches(@CheckForNull Collection heads, } return nodes; } + + /** Used for extracting metrics from the flow graph */ + public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { + if (heads == null || heads.size() == 0) { + return; + } + initialize(); + this.setHeads(heads); + Collection endNodes = Collections.EMPTY_SET; + + boolean continueAnalysis = true; + while (continueAnalysis && (_current = next(endNodes)) != null) { + continueAnalysis = visitor.visit(_current); + } + } } /** Does a simple and efficient depth-first search */ diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 094781ec..b2dce9d4 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -37,6 +37,7 @@ import org.jvnet.hudson.test.BuildWatcher; import org.jvnet.hudson.test.JenkinsRule; +import javax.annotation.Nonnull; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -49,6 +50,24 @@ public class TestFlowScanner { @Rule public JenkinsRule r = new JenkinsRule(); + static final class CollectingVisitor implements FlowScanner.FlowNodeVisitor { + ArrayList visited = new ArrayList(); + + @Override + public boolean visit(@Nonnull FlowNode f) { + visited.add(f); + return true; + } + + public void reset() { + this.visited.clear(); + } + + public ArrayList getVisited() { + return visited; + } + }; + /** Tests the basic scan algorithm, predicate use, start/stop nodes */ @Test public void testSimpleScan() throws Exception { @@ -98,11 +117,16 @@ public void testSimpleScan() throws Exception { Assert.assertEquals(0, nodeList.size()); } + + CollectingVisitor vis = new CollectingVisitor(); // Verify we touch head and foot nodes too for (FlowScanner.ScanAlgorithm sa : scans) { - System.out.println("Testing class: "+sa.getClass()); + System.out.println("Testing class: " + sa.getClass()); Collection nodeList = sa.findAllMatches(heads, null, (Predicate)Predicates.alwaysTrue()); + vis.reset(); + sa.visitAll(heads, vis); Assert.assertEquals(5, nodeList.size()); + Assert.assertEquals(5, vis.getVisited().size()); } // Test with a stop node given, sometimes no matches From eaf47694aa3d488efe4c6ec23e27b03817b0a7bd Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 19 Apr 2016 05:58:27 -0400 Subject: [PATCH 008/104] Fix FindBugs complaints --- .../plugins/workflow/graph/FlowScanner.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 1f366aec..b6eba1a3 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -25,7 +25,6 @@ */ import com.google.common.base.Predicate; -import com.sun.tools.javac.comp.Flow; import hudson.model.Action; import org.jenkinsci.plugins.workflow.actions.ErrorAction; import org.jenkinsci.plugins.workflow.actions.LabelAction; @@ -33,6 +32,7 @@ import org.jenkinsci.plugins.workflow.actions.StageAction; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; +import org.jenkinsci.plugins.workflow.steps.StepDescriptor; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; @@ -41,7 +41,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Set; @@ -78,13 +77,14 @@ public boolean apply(FlowNode input) { static final Predicate MATCH_HAS_ERROR = createPredicateWhereActionExists(ErrorAction.class); static final Predicate MATCH_HAS_LOG = createPredicateWhereActionExists(LogAction.class); - public static Predicate predicateMatchStepDescriptor(final String descriptorId) { + public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) { Predicate outputPredicate = new Predicate() { @Override public boolean apply(FlowNode input) { if (input instanceof StepAtomNode) { StepAtomNode san = (StepAtomNode)input; - return descriptorId.equals(san.getDescriptor().getId()); + StepDescriptor sd = san.getDescriptor(); + return sd != null && descriptorId.equals(sd.getId()); } return false; } @@ -226,7 +226,7 @@ public List findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection endNodes, Predicate matchCondition) { if (heads == null || heads.size() == 0) { - return null; + return Collections.EMPTY_LIST; } initialize(); Collection fastEndNodes = convertToFastCheckable(endNodes); @@ -288,7 +288,7 @@ protected FlowNode next(@Nonnull Collection blackList) { if (parents != null) { for (FlowNode f : parents) { if (!blackList.contains(f) && !_visited.contains(f)) { - if (output != null ) { + if (output == null ) { output = f; } else { _queue.push(f); @@ -334,7 +334,7 @@ protected FlowNode next(@Nonnull Collection blackList) { return _current; } List parents = _current.getParents(); - if (parents != null || parents.size() > 0) { + if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { return f; @@ -365,7 +365,7 @@ protected FlowNode next(@Nonnull Collection blackList) { return _current; } List parents = _current.getParents(); - if (parents != null || parents.size() > 0) { + if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { FlowNode jumped = jumpBlock(f); From 0ed42f803786569e57c19e6f3215ffcd0e913c41 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 19 Apr 2016 05:58:46 -0400 Subject: [PATCH 009/104] Flesh out incremental analysis cache skeleton --- .../graph/IncrementalFlowAnalysis.java | 34 ++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java index 56eaff55..7c20820e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java @@ -4,6 +4,7 @@ import com.google.common.base.Predicate; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import javax.annotation.CheckForNull; @@ -80,5 +81,36 @@ protected void update(@Nonnull FlowExecution exec) { } } - static Cache analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); + public static class IncrementalAnalysisCache { + Function analysisFunction; + Predicate matchCondition; + Cache> analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); + + public T getAnalysisValue(@CheckForNull FlowExecution f) { + if (f != null) { + String url; + try { + url = f.getUrl(); + } catch (IOException ioe) { + throw new IllegalStateException(ioe); + } + IncrementalAnalysis analysis = analysisCache.getIfPresent(url); + if (analysis != null) { + return analysis.getUpdatedValue(f); + } else { + IncrementalAnalysis newAnalysis = new IncrementalAnalysis(matchCondition, analysisFunction); + T value = newAnalysis.getUpdatedValue(f); + analysisCache.put(url, newAnalysis); + return value; + } + } + + return null; + } + + public IncrementalAnalysisCache(Predicate matchCondition, Function analysisFunction) { + this.matchCondition = matchCondition; + this.analysisFunction = analysisFunction; + } + } } From 7f4d4882ff14bcca93e4f17093f148f1a469eeca Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 19 Apr 2016 06:02:07 -0400 Subject: [PATCH 010/104] Move FlowScanner method to search by StepDescriptor into test only to avoid a cyclic plugin dependency --- pom.xml | 2 +- .../plugins/workflow/graph/FlowScanner.java | 17 -------------- .../graph/IncrementalFlowAnalysis.java | 1 - .../workflow/graph/TestFlowScanner.java | 23 ++++++++++++++++--- 4 files changed, 21 insertions(+), 22 deletions(-) diff --git a/pom.xml b/pom.xml index 5fa4333b..41db0596 100644 --- a/pom.xml +++ b/pom.xml @@ -77,10 +77,10 @@ test - ${project.groupId} workflow-cps 2.2-SNAPSHOT + test ${project.groupId} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index b6eba1a3..1d9c0018 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -31,8 +31,6 @@ import org.jenkinsci.plugins.workflow.actions.LogAction; import org.jenkinsci.plugins.workflow.actions.StageAction; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; -import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; -import org.jenkinsci.plugins.workflow.steps.StepDescriptor; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; @@ -77,21 +75,6 @@ public boolean apply(FlowNode input) { static final Predicate MATCH_HAS_ERROR = createPredicateWhereActionExists(ErrorAction.class); static final Predicate MATCH_HAS_LOG = createPredicateWhereActionExists(LogAction.class); - public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) { - Predicate outputPredicate = new Predicate() { - @Override - public boolean apply(FlowNode input) { - if (input instanceof StepAtomNode) { - StepAtomNode san = (StepAtomNode)input; - StepDescriptor sd = san.getDescriptor(); - return sd != null && descriptorId.equals(sd.getId()); - } - return false; - } - }; - return outputPredicate; - } - public interface FlowNodeVisitor { /** * Visit the flow node, and indicate if we should continue analysis diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java index 7c20820e..5a0b4190 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java @@ -4,7 +4,6 @@ import com.google.common.base.Predicate; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import javax.annotation.CheckForNull; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index b2dce9d4..79898d3f 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -27,9 +27,11 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; +import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.jenkinsci.plugins.workflow.steps.StepDescriptor; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Rule; @@ -50,6 +52,21 @@ public class TestFlowScanner { @Rule public JenkinsRule r = new JenkinsRule(); + public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) { + Predicate outputPredicate = new Predicate() { + @Override + public boolean apply(FlowNode input) { + if (input instanceof StepAtomNode) { + StepAtomNode san = (StepAtomNode)input; + StepDescriptor sd = san.getDescriptor(); + return sd != null && descriptorId.equals(sd.getId()); + } + return false; + } + }; + return outputPredicate; + } + static final class CollectingVisitor implements FlowScanner.FlowNodeVisitor { ArrayList visited = new ArrayList(); @@ -84,7 +101,7 @@ public void testSimpleScan() throws Exception { new FlowScanner.BlockHoppingScanner() }; - Predicate echoPredicate = FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + Predicate echoPredicate = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); List heads = exec.getCurrentHeads(); // Test expected scans with no stop nodes given (different ways of specifying none) @@ -163,7 +180,7 @@ public void testBlockScan() throws Exception { "sleep 1" )); WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - Predicate matchEchoStep = FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); // Test blockhopping FlowScanner.BlockHoppingScanner blockHoppingScanner = new FlowScanner.BlockHoppingScanner(); @@ -198,7 +215,7 @@ public void testParallelScan() throws Exception { )); WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); Collection heads = b.getExecution().getCurrentHeads(); - Predicate matchEchoStep = FlowScanner.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); FlowScanner.ScanAlgorithm scanner = new FlowScanner.LinearScanner(); Collection matches = scanner.findAllMatches(heads, null, matchEchoStep); From deb585b03a23b444da1f6484195c98f589b004f0 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 25 Apr 2016 17:43:54 -0400 Subject: [PATCH 011/104] Rename findAllMatches to filter --- .../plugins/workflow/graph/FlowScanner.java | 10 ++++---- .../workflow/graph/TestFlowScanner.java | 24 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 1d9c0018..a7676dcf 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -106,7 +106,7 @@ public interface ScanAlgorithm { * @return All nodes matching condition */ @Nonnull - public Collection findAllMatches(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); + public Collection filter(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); /** Used for extracting metrics from the flow graph */ public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor); @@ -179,7 +179,7 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnul @Nonnull public Collection findAllMatches(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { - return this.findAllMatches(heads, null, matchPredicate); + return this.filter(heads, null, matchPredicate); } // Basic algo impl @@ -205,9 +205,9 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, } // Basic algo impl - public List findAllMatches(@CheckForNull Collection heads, - @CheckForNull Collection endNodes, - Predicate matchCondition) { + public List filter(@CheckForNull Collection heads, + @CheckForNull Collection endNodes, + Predicate matchCondition) { if (heads == null || heads.size() == 0) { return Collections.EMPTY_LIST; } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 79898d3f..3983664c 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -114,12 +114,12 @@ public void testSimpleScan() throws Exception { node = sa.findFirstMatch(heads, Collections.EMPTY_SET, echoPredicate); Assert.assertEquals(exec.getNode("5"), node); - Collection nodeList = sa.findAllMatches(heads, null, echoPredicate); + Collection nodeList = sa.filter(heads, null, echoPredicate); FlowNode[] expected = new FlowNode[]{exec.getNode("5"), exec.getNode("4")}; Assert.assertArrayEquals(expected, nodeList.toArray()); - nodeList = sa.findAllMatches(heads, Collections.EMPTY_LIST, echoPredicate); + nodeList = sa.filter(heads, Collections.EMPTY_LIST, echoPredicate); Assert.assertArrayEquals(expected, nodeList.toArray()); - nodeList = sa.findAllMatches(heads, Collections.EMPTY_SET, echoPredicate); + nodeList = sa.filter(heads, Collections.EMPTY_SET, echoPredicate); Assert.assertArrayEquals(expected, nodeList.toArray()); } @@ -129,7 +129,7 @@ public void testSimpleScan() throws Exception { FlowNode node = sa.findFirstMatch(heads, null, (Predicate)Predicates.alwaysFalse()); Assert.assertNull(node); - Collection nodeList = sa.findAllMatches(heads, null, (Predicate)Predicates.alwaysFalse()); + Collection nodeList = sa.filter(heads, null, (Predicate) Predicates.alwaysFalse()); Assert.assertNotNull(nodeList); Assert.assertEquals(0, nodeList.size()); } @@ -139,7 +139,7 @@ public void testSimpleScan() throws Exception { // Verify we touch head and foot nodes too for (FlowScanner.ScanAlgorithm sa : scans) { System.out.println("Testing class: " + sa.getClass()); - Collection nodeList = sa.findAllMatches(heads, null, (Predicate)Predicates.alwaysTrue()); + Collection nodeList = sa.filter(heads, null, (Predicate) Predicates.alwaysTrue()); vis.reset(); sa.visitAll(heads, vis); Assert.assertEquals(5, nodeList.size()); @@ -153,14 +153,14 @@ public void testSimpleScan() throws Exception { FlowNode node = sa.findFirstMatch(heads, noMatchEndNode, echoPredicate); Assert.assertNull(node); - Collection nodeList = sa.findAllMatches(heads, noMatchEndNode, echoPredicate); + Collection nodeList = sa.filter(heads, noMatchEndNode, echoPredicate); Assert.assertNotNull(nodeList); Assert.assertEquals(0, nodeList.size()); // Now we try with a stop list the reduces node set for multiple matches node = sa.findFirstMatch(heads, singleMatchEndNode, echoPredicate); Assert.assertEquals(exec.getNode("5"), node); - nodeList = sa.findAllMatches(heads, singleMatchEndNode, echoPredicate); + nodeList = sa.filter(heads, singleMatchEndNode, echoPredicate); Assert.assertNotNull(nodeList); Assert.assertEquals(1, nodeList.size()); Assert.assertEquals(exec.getNode("5"), nodeList.iterator().next()); @@ -184,13 +184,13 @@ public void testBlockScan() throws Exception { // Test blockhopping FlowScanner.BlockHoppingScanner blockHoppingScanner = new FlowScanner.BlockHoppingScanner(); - Collection matches = blockHoppingScanner.findAllMatches(b.getExecution().getCurrentHeads(), null, matchEchoStep); + Collection matches = blockHoppingScanner.filter(b.getExecution().getCurrentHeads(), null, matchEchoStep); // This means we jumped the blocks Assert.assertEquals(1, matches.size()); FlowScanner.DepthFirstScanner depthFirstScanner = new FlowScanner.DepthFirstScanner(); - matches = depthFirstScanner.findAllMatches(b.getExecution().getCurrentHeads(), null, matchEchoStep); + matches = depthFirstScanner.filter(b.getExecution().getCurrentHeads(), null, matchEchoStep); // Nodes all covered Assert.assertEquals(3, matches.size()); @@ -218,15 +218,15 @@ public void testParallelScan() throws Exception { Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); FlowScanner.ScanAlgorithm scanner = new FlowScanner.LinearScanner(); - Collection matches = scanner.findAllMatches(heads, null, matchEchoStep); + Collection matches = scanner.filter(heads, null, matchEchoStep); Assert.assertTrue(matches.size() >= 3 && matches.size() <= 4); scanner = new FlowScanner.DepthFirstScanner(); - matches = scanner.findAllMatches(heads, null, matchEchoStep); + matches = scanner.filter(heads, null, matchEchoStep); Assert.assertTrue(matches.size() == 5); scanner = new FlowScanner.BlockHoppingScanner(); - matches = scanner.findAllMatches(heads, null, matchEchoStep); + matches = scanner.filter(heads, null, matchEchoStep); Assert.assertTrue(matches.size() == 2); } From 72e49df68ac14865c79056cdd4ccbd5cd0fa6b7e Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 25 Apr 2016 18:15:03 -0400 Subject: [PATCH 012/104] Refactor flow graph incremental analysis, add stub of a test until I can figure out how to get it to use something like semaphore step --- ...java => IncrementalFlowAnalysisCache.java} | 86 ++++++++++++------- .../graph/TestIncrementalFlowAnalysis.java | 85 ++++++++++++++++++ 2 files changed, 142 insertions(+), 29 deletions(-) rename src/main/java/org/jenkinsci/plugins/workflow/graph/{IncrementalFlowAnalysis.java => IncrementalFlowAnalysisCache.java} (53%) create mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java similarity index 53% rename from src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java rename to src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java index 5a0b4190..f0f1c6e5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysis.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java @@ -1,3 +1,27 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + package org.jenkinsci.plugins.workflow.graph; import com.google.common.base.Function; @@ -16,7 +40,11 @@ * Provides incremental analysis of flow graphs, where updates are on the head * @author Sam Van Oort */ -public class IncrementalFlowAnalysis { +public class IncrementalFlowAnalysisCache { + + Function analysisFunction; + Predicate matchCondition; + Cache> analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); protected static class IncrementalAnalysis { protected List lastHeadIds = new ArrayList(); @@ -80,36 +108,36 @@ protected void update(@Nonnull FlowExecution exec) { } } - public static class IncrementalAnalysisCache { - Function analysisFunction; - Predicate matchCondition; - Cache> analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); - - public T getAnalysisValue(@CheckForNull FlowExecution f) { - if (f != null) { - String url; - try { - url = f.getUrl(); - } catch (IOException ioe) { - throw new IllegalStateException(ioe); - } - IncrementalAnalysis analysis = analysisCache.getIfPresent(url); - if (analysis != null) { - return analysis.getUpdatedValue(f); - } else { - IncrementalAnalysis newAnalysis = new IncrementalAnalysis(matchCondition, analysisFunction); - T value = newAnalysis.getUpdatedValue(f); - analysisCache.put(url, newAnalysis); - return value; - } + public T getAnalysisValue(@CheckForNull FlowExecution f) { + if (f != null) { + String url; + try { + url = f.getUrl(); + } catch (IOException ioe) { + throw new IllegalStateException(ioe); + } + IncrementalAnalysis analysis = analysisCache.getIfPresent(url); + if (analysis != null) { + return analysis.getUpdatedValue(f); + } else { + IncrementalAnalysis newAnalysis = new IncrementalAnalysis(matchCondition, analysisFunction); + T value = newAnalysis.getUpdatedValue(f); + analysisCache.put(url, newAnalysis); + return value; } - - return null; } - public IncrementalAnalysisCache(Predicate matchCondition, Function analysisFunction) { - this.matchCondition = matchCondition; - this.analysisFunction = analysisFunction; - } + return null; + } + + public IncrementalFlowAnalysisCache(Predicate matchCondition, Function analysisFunction) { + this.matchCondition = matchCondition; + this.analysisFunction = analysisFunction; + } + + public IncrementalFlowAnalysisCache(Predicate matchCondition, Function analysisFunction, Cache myCache) { + this.matchCondition = matchCondition; + this.analysisFunction = analysisFunction; + this.analysisCache = myCache; } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java new file mode 100644 index 00000000..aef6864a --- /dev/null +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java @@ -0,0 +1,85 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graph; + +import com.google.common.base.Function; +import com.google.common.base.Predicate; +import org.jenkinsci.plugins.workflow.actions.LabelAction; +import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.jvnet.hudson.test.BuildWatcher; +import org.jvnet.hudson.test.JenkinsRule; +import org.jenkinsci.plugins.workflow.test.steps.SemaphoreStep; + +import java.util.Collection; + +/** + * @author svanoort + */ +public class TestIncrementalFlowAnalysis { + @ClassRule + public static BuildWatcher buildWatcher = new BuildWatcher(); + + @Rule + public JenkinsRule r = new JenkinsRule(); + + /** Tests the basic incremental analysis */ + @Test + public void testIncrementalAnalysis() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "for (int i=0; i<4; i++) {\n" + + " stage \"stage-$i\"\n" + + " echo \"Doing $i\"\n" + + " semaphore 'wait'\n" + + "}" + )); + + // Search conditions + Predicate labelledNode = FlowScanner.createPredicateWhereActionExists(LabelAction.class); + Function getLabelFunction = new Function() { + @Override + public String apply(FlowNode input) { + LabelAction labelled = input.getAction(LabelAction.class); + return (labelled != null) ? labelled.getDisplayName() : null; + } + }; + + IncrementalFlowAnalysisCache incrementalAnalysis = new IncrementalFlowAnalysisCache(labelledNode, getLabelFunction); + + // TODO how the devil do I test this, when SemaphoreStep is part of another repo's test classes? + } + + /** Tests analysis where there are multiple heads (parallel excecution blocks) */ + @Test + public void testIncrementalAnalysisParallel() throws Exception { + // TODO figure out a case where this is actually a thing? + } +} From eb485520d01816854c7051d0866a833a179e6e9b Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 25 Apr 2016 18:16:12 -0400 Subject: [PATCH 013/104] Fix imports in incremental flow analysis --- .../plugins/workflow/graph/TestIncrementalFlowAnalysis.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java index aef6864a..716d444a 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java @@ -27,18 +27,14 @@ import com.google.common.base.Function; import com.google.common.base.Predicate; import org.jenkinsci.plugins.workflow.actions.LabelAction; -import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition +import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.job.WorkflowJob; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.jvnet.hudson.test.BuildWatcher; import org.jvnet.hudson.test.JenkinsRule; -import org.jenkinsci.plugins.workflow.test.steps.SemaphoreStep; -import java.util.Collection; /** * @author svanoort From e640fd33a333e12357b52d7e4e31aa0197a54103 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 26 Apr 2016 15:05:39 -0400 Subject: [PATCH 014/104] Refactor the incremental flow analysis to allow testing by running incrementally --- .../graph/IncrementalFlowAnalysisCache.java | 81 ++++++++++++++----- .../graph/TestIncrementalFlowAnalysis.java | 14 ++-- 2 files changed, 67 insertions(+), 28 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java index f0f1c6e5..6742dc76 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java @@ -37,7 +37,11 @@ import java.util.List; /** - * Provides incremental analysis of flow graphs, where updates are on the head + * Provides an efficient way to find the most recent (closest to head) node matching a condition, and get info about it + * + * This is useful in cases where we are watching an in-progress pipeline execution. + * It uses caching and only looks at new nodes (the delta since last execution). + * @TODO Thread safety? * @author Sam Van Oort */ public class IncrementalFlowAnalysisCache { @@ -47,7 +51,7 @@ public class IncrementalFlowAnalysisCache { Cache> analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); protected static class IncrementalAnalysis { - protected List lastHeadIds = new ArrayList(); + protected List lastHeadIds = new ArrayList(); // We don't want to hold refs to the actual nodes protected T lastValue; /** Gets value from a flownode */ @@ -73,32 +77,55 @@ public T getUpdatedValue(@CheckForNull FlowExecution exec) { return null; } List heads = exec.getCurrentHeads(); - if (heads != null && heads.size() == lastHeadIds.size()) { - boolean useCache = false; + if (heads == null || heads.size() == 0) { + return null; + } + return getUpdatedValueInternal(exec, heads); + } + + @CheckForNull + public T getUpdatedValue(@CheckForNull FlowExecution exec, @Nonnull List heads) { + if (exec == null || heads.size() == 0) { + return null; + } + return getUpdatedValueInternal(exec, heads); + } + + /** + * Internal implementation + * @param exec Execution, used in obtaining node instances + * @param heads Heads to scan from, cannot be empty + * @return Updated value or null if not present + */ + @CheckForNull + protected T getUpdatedValueInternal(@Nonnull FlowExecution exec, @Nonnull List heads) { + boolean hasChanged = heads.size() == lastHeadIds.size(); + if (hasChanged) { for (FlowNode f : heads) { - if (lastHeadIds.contains(f.getId())) { - useCache = true; + if (!lastHeadIds.contains(f.getId())) { + hasChanged = false; break; } } - if (!useCache) { - update(exec); - } - return lastValue; } - return null; + if (!hasChanged) { + updateInternal(exec, heads); + } + return lastValue; } - protected void update(@Nonnull FlowExecution exec) { - ArrayList nodes = new ArrayList(); + // FlowExecution is used for look + protected void updateInternal(@Nonnull FlowExecution exec, @Nonnull List heads) { + ArrayList stopNodes = new ArrayList(); + // Fetch the actual flow nodes to use as halt conditions for (String nodeId : this.lastHeadIds) { try { - nodes.add(exec.getNode(nodeId)); + stopNodes.add(exec.getNode(nodeId)); } catch (IOException ioe) { throw new IllegalStateException(ioe); } } - FlowNode matchNode = new FlowScanner.BlockHoppingScanner().findFirstMatch(exec.getCurrentHeads(), nodes, this.nodeMatchCondition); + FlowNode matchNode = new FlowScanner.BlockHoppingScanner().findFirstMatch(heads, stopNodes, this.nodeMatchCondition); this.lastValue = this.valueExtractor.apply(matchNode); this.lastHeadIds.clear(); @@ -108,25 +135,39 @@ protected void update(@Nonnull FlowExecution exec) { } } + /** + * Get the latest value, using the heads of a FlowExecutions + * @param f Flow executions + * @return Analysis value, or null no nodes match condition/flow has not begun + */ + @CheckForNull public T getAnalysisValue(@CheckForNull FlowExecution f) { - if (f != null) { + if (f == null) { + return null; + } else { + return getAnalysisValue(f, f.getCurrentHeads()); + } + } + + @CheckForNull + public T getAnalysisValue(@CheckForNull FlowExecution exec, @CheckForNull List heads) { + if (exec != null && heads == null && heads.size() != 0) { String url; try { - url = f.getUrl(); + url = exec.getUrl(); } catch (IOException ioe) { throw new IllegalStateException(ioe); } IncrementalAnalysis analysis = analysisCache.getIfPresent(url); if (analysis != null) { - return analysis.getUpdatedValue(f); + return analysis.getUpdatedValue(exec, heads); } else { IncrementalAnalysis newAnalysis = new IncrementalAnalysis(matchCondition, analysisFunction); - T value = newAnalysis.getUpdatedValue(f); + T value = newAnalysis.getUpdatedValue(exec, heads); analysisCache.put(url, newAnalysis); return value; } } - return null; } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java index 716d444a..5e1c87ee 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java @@ -28,7 +28,9 @@ import com.google.common.base.Predicate; import org.jenkinsci.plugins.workflow.actions.LabelAction; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -54,7 +56,6 @@ public void testIncrementalAnalysis() throws Exception { "for (int i=0; i<4; i++) {\n" + " stage \"stage-$i\"\n" + " echo \"Doing $i\"\n" + - " semaphore 'wait'\n" + "}" )); @@ -69,13 +70,10 @@ public String apply(FlowNode input) { }; IncrementalFlowAnalysisCache incrementalAnalysis = new IncrementalFlowAnalysisCache(labelledNode, getLabelFunction); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowExecution exec = b.getExecution(); + FlowNode test = exec.getNode("4"); - // TODO how the devil do I test this, when SemaphoreStep is part of another repo's test classes? - } - - /** Tests analysis where there are multiple heads (parallel excecution blocks) */ - @Test - public void testIncrementalAnalysisParallel() throws Exception { - // TODO figure out a case where this is actually a thing? + // TODO add tests based on calling incremental analysis from points further along flow, possible in some paralle cases } } From 7c1e6df4fae63b0c7c2de4f8e7bfb754dfa6e7b4 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 26 Apr 2016 15:06:46 -0400 Subject: [PATCH 015/104] Remove the incremental flow analysis so we can push it into a separate PR --- .../graph/IncrementalFlowAnalysisCache.java | 184 ------------------ .../graph/TestIncrementalFlowAnalysis.java | 79 -------- 2 files changed, 263 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java delete mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java deleted file mode 100644 index 6742dc76..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/IncrementalFlowAnalysisCache.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * The MIT License - * - * Copyright (c) 2016, CloudBees, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -package org.jenkinsci.plugins.workflow.graph; - -import com.google.common.base.Function; -import com.google.common.base.Predicate; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Provides an efficient way to find the most recent (closest to head) node matching a condition, and get info about it - * - * This is useful in cases where we are watching an in-progress pipeline execution. - * It uses caching and only looks at new nodes (the delta since last execution). - * @TODO Thread safety? - * @author Sam Van Oort - */ -public class IncrementalFlowAnalysisCache { - - Function analysisFunction; - Predicate matchCondition; - Cache> analysisCache = CacheBuilder.newBuilder().initialCapacity(100).build(); - - protected static class IncrementalAnalysis { - protected List lastHeadIds = new ArrayList(); // We don't want to hold refs to the actual nodes - protected T lastValue; - - /** Gets value from a flownode */ - protected Function valueExtractor; - - protected Predicate nodeMatchCondition; - - public IncrementalAnalysis(@Nonnull Predicate nodeMatchCondition, @Nonnull Function valueExtractFunction){ - this.nodeMatchCondition = nodeMatchCondition; - this.valueExtractor = valueExtractFunction; - } - - /** - * Look up a value scanned from the flow - * If the heads haven't changed in the flow, return the current heads - * If they have, only hunt from the current value until the last one - * @param exec - * @return - */ - @CheckForNull - public T getUpdatedValue(@CheckForNull FlowExecution exec) { - if (exec == null) { - return null; - } - List heads = exec.getCurrentHeads(); - if (heads == null || heads.size() == 0) { - return null; - } - return getUpdatedValueInternal(exec, heads); - } - - @CheckForNull - public T getUpdatedValue(@CheckForNull FlowExecution exec, @Nonnull List heads) { - if (exec == null || heads.size() == 0) { - return null; - } - return getUpdatedValueInternal(exec, heads); - } - - /** - * Internal implementation - * @param exec Execution, used in obtaining node instances - * @param heads Heads to scan from, cannot be empty - * @return Updated value or null if not present - */ - @CheckForNull - protected T getUpdatedValueInternal(@Nonnull FlowExecution exec, @Nonnull List heads) { - boolean hasChanged = heads.size() == lastHeadIds.size(); - if (hasChanged) { - for (FlowNode f : heads) { - if (!lastHeadIds.contains(f.getId())) { - hasChanged = false; - break; - } - } - } - if (!hasChanged) { - updateInternal(exec, heads); - } - return lastValue; - } - - // FlowExecution is used for look - protected void updateInternal(@Nonnull FlowExecution exec, @Nonnull List heads) { - ArrayList stopNodes = new ArrayList(); - // Fetch the actual flow nodes to use as halt conditions - for (String nodeId : this.lastHeadIds) { - try { - stopNodes.add(exec.getNode(nodeId)); - } catch (IOException ioe) { - throw new IllegalStateException(ioe); - } - } - FlowNode matchNode = new FlowScanner.BlockHoppingScanner().findFirstMatch(heads, stopNodes, this.nodeMatchCondition); - this.lastValue = this.valueExtractor.apply(matchNode); - - this.lastHeadIds.clear(); - for (FlowNode f : exec.getCurrentHeads()) { - lastHeadIds.add(f.getId()); - } - } - } - - /** - * Get the latest value, using the heads of a FlowExecutions - * @param f Flow executions - * @return Analysis value, or null no nodes match condition/flow has not begun - */ - @CheckForNull - public T getAnalysisValue(@CheckForNull FlowExecution f) { - if (f == null) { - return null; - } else { - return getAnalysisValue(f, f.getCurrentHeads()); - } - } - - @CheckForNull - public T getAnalysisValue(@CheckForNull FlowExecution exec, @CheckForNull List heads) { - if (exec != null && heads == null && heads.size() != 0) { - String url; - try { - url = exec.getUrl(); - } catch (IOException ioe) { - throw new IllegalStateException(ioe); - } - IncrementalAnalysis analysis = analysisCache.getIfPresent(url); - if (analysis != null) { - return analysis.getUpdatedValue(exec, heads); - } else { - IncrementalAnalysis newAnalysis = new IncrementalAnalysis(matchCondition, analysisFunction); - T value = newAnalysis.getUpdatedValue(exec, heads); - analysisCache.put(url, newAnalysis); - return value; - } - } - return null; - } - - public IncrementalFlowAnalysisCache(Predicate matchCondition, Function analysisFunction) { - this.matchCondition = matchCondition; - this.analysisFunction = analysisFunction; - } - - public IncrementalFlowAnalysisCache(Predicate matchCondition, Function analysisFunction, Cache myCache) { - this.matchCondition = matchCondition; - this.analysisFunction = analysisFunction; - this.analysisCache = myCache; - } -} diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java deleted file mode 100644 index 5e1c87ee..00000000 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestIncrementalFlowAnalysis.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * The MIT License - * - * Copyright (c) 2016, CloudBees, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -package org.jenkinsci.plugins.workflow.graph; - -import com.google.common.base.Function; -import com.google.common.base.Predicate; -import org.jenkinsci.plugins.workflow.actions.LabelAction; -import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; -import org.jenkinsci.plugins.workflow.job.WorkflowJob; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.jvnet.hudson.test.BuildWatcher; -import org.jvnet.hudson.test.JenkinsRule; - - -/** - * @author svanoort - */ -public class TestIncrementalFlowAnalysis { - @ClassRule - public static BuildWatcher buildWatcher = new BuildWatcher(); - - @Rule - public JenkinsRule r = new JenkinsRule(); - - /** Tests the basic incremental analysis */ - @Test - public void testIncrementalAnalysis() throws Exception { - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); - job.setDefinition(new CpsFlowDefinition( - "for (int i=0; i<4; i++) {\n" + - " stage \"stage-$i\"\n" + - " echo \"Doing $i\"\n" + - "}" - )); - - // Search conditions - Predicate labelledNode = FlowScanner.createPredicateWhereActionExists(LabelAction.class); - Function getLabelFunction = new Function() { - @Override - public String apply(FlowNode input) { - LabelAction labelled = input.getAction(LabelAction.class); - return (labelled != null) ? labelled.getDisplayName() : null; - } - }; - - IncrementalFlowAnalysisCache incrementalAnalysis = new IncrementalFlowAnalysisCache(labelledNode, getLabelFunction); - WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - FlowExecution exec = b.getExecution(); - FlowNode test = exec.getNode("4"); - - // TODO add tests based on calling incremental analysis from points further along flow, possible in some paralle cases - } -} From 6dfbb2d58f86e4f3327c51e9103a05c929c95156 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 26 Apr 2016 23:56:18 -0400 Subject: [PATCH 016/104] Start a ForkFlowScanner (WIP), add lots of Javadoc/comments, rename BlockHoppingScanner to indicate it is linear --- .../plugins/workflow/graph/FlowScanner.java | 157 ++++++++++++++++-- .../workflow/graph/TestFlowScanner.java | 8 +- 2 files changed, 150 insertions(+), 15 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index a7676dcf..0506ef79 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -43,11 +43,10 @@ import java.util.Set; /** - * Generified algorithms for scanning flows for information + * Generified algorithms for scanning pipeline flow graphs for information * Supports a variety of algorithms for searching, and pluggable conditions - * Worth noting: predicates may be stateful here + * Worth noting: predicates may be stateful here, and may see some or all of the nodes, depending on the scan method used. * - * ANALYSIS method will * @author Sam Van Oort */ public class FlowScanner { @@ -90,7 +89,7 @@ public interface ScanAlgorithm { /** * Search for first node (walking from the heads through parents) that matches the condition - * @param heads Nodes to start searching from + * @param heads Nodes to start searching from, which may be filtered against blackList * @param stopNodes Search doesn't go beyond any of these nodes, null or empty will run to end of flow * @param matchPredicate Matching condition for search * @return First node matching condition, or null if none found @@ -100,7 +99,7 @@ public interface ScanAlgorithm { /** * Search for first node (walking from the heads through parents) that matches the condition - * @param heads Nodes to start searching from + * @param heads Nodes to start searching from, which may be filtered against a blackList * @param stopNodes Search doesn't go beyond any of these nodes, null or empty will run to end of flow * @param matchPredicate Matching condition for search * @return All nodes matching condition @@ -124,9 +123,10 @@ public static abstract class AbstractFlowScanner implements ScanAlgorithm { protected ArrayDeque _queue; protected FlowNode _current; - // Public APIs need to invoke this before searches + /** Public APIs need to invoke this before searches */ protected abstract void initialize(); + /** Add current head nodes to current processing set */ protected abstract void setHeads(@Nonnull Collection heads); /** @@ -166,6 +166,8 @@ protected static FlowNode linearScanUntil(@Nonnull FlowNode start, @Nonnull Pred protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { if (nodeCollection == null || nodeCollection.size()==0) { return Collections.EMPTY_SET; + } else if (nodeCollection.size() == 1) { + return Collections.singleton(nodeCollection.iterator().next()); } else if (nodeCollection instanceof Set) { return nodeCollection; } @@ -194,6 +196,9 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, Collection fastEndNodes = convertToFastCheckable(endNodes); Collection filteredHeads = new HashSet(heads); filteredHeads.removeAll(fastEndNodes); + if (filteredHeads.size() == 0) { + return null; + } this.setHeads(filteredHeads); while ((_current = next(fastEndNodes)) != null) { @@ -214,6 +219,9 @@ public List filter(@CheckForNull Collection heads, initialize(); Collection fastEndNodes = convertToFastCheckable(endNodes); Collection filteredHeads = new HashSet(heads); + if (filteredHeads.size() == 0) { + return Collections.EMPTY_LIST; + } filteredHeads.removeAll(fastEndNodes); this.setHeads(filteredHeads); ArrayList nodes = new ArrayList(); @@ -242,7 +250,9 @@ public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor v } } - /** Does a simple and efficient depth-first search */ + /** Does a simple and efficient depth-first search: + * - This will visit each node exactly once, and walks through the first ancestry before revisiting parallel branches + */ public static class DepthFirstScanner extends AbstractFlowScanner { protected HashSet _visited = new HashSet(); @@ -259,13 +269,13 @@ protected void initialize() { @Override protected void setHeads(@Nonnull Collection heads) { - // Needs to handle blacklist _queue.addAll(heads); } @Override protected FlowNode next(@Nonnull Collection blackList) { FlowNode output = null; + // Walk through parents of current node if (_current != null) { List parents = _current.getParents(); if (parents != null) { @@ -284,13 +294,14 @@ protected FlowNode next(@Nonnull Collection blackList) { if (output == null && _queue.size() > 0) { output = _queue.pop(); } - _visited.add(output); + _visited.add(output); // No-op if null return output; } } /** * Scans through a single ancestry, does not cover parallel branches + * Use case: we don't care about parallel branches */ public static class LinearScanner extends AbstractFlowScanner { protected boolean isFirst = true; @@ -329,9 +340,11 @@ protected FlowNode next(@Nonnull Collection blackList) { } /** - * Scanner that jumps over nested blocks + * LinearScanner that jumps over nested blocks + * Use case: finding information about enclosing blocks or preceding nodes + * - Ex: finding out the executor workspace used to run a flownode */ - public static class BlockHoppingScanner extends LinearScanner { + public static class LinearBlockHoppingScanner extends LinearScanner { protected FlowNode jumpBlock(FlowNode current) { return (current instanceof BlockEndNode) ? @@ -364,4 +377,126 @@ protected FlowNode next(@Nonnull Collection blackList) { return null; } } + + /** + * Scanner that will scan down forks when we hit parallel blocks. + * Think of it as the opposite reverse of {@link org.jenkinsci.plugins.workflow.graph.FlowScanner.DepthFirstScanner}: + * - We visit every node exactly once, but walk through all parallel forks before resuming the main flow + * + * This is optimal in many cases, since it need only keep minimal state information + * It is also very easy to make it branch/block-aware, since we have all the fork information at all times. + */ + public static class ForkScanner extends AbstractFlowScanner { + + /** These are the BlockStartNodes that begin parallel blocks + * There will be one entry for every executing parallel branch in current flow + */ + ArrayDeque forkStarts = new ArrayDeque(); + + /** FlowNode that will terminate the current parallel block */ + FlowNode currentParallelStart = null; + + /** How deep are we in parallel branches, if 0 we are linear */ + protected int parallelDepth = 0; + + @Override + protected void initialize() { + if (_queue == null) { + _queue = new ArrayDeque(); + } else { + _queue.clear(); + } + } + + @Override + protected void setHeads(@Nonnull Collection heads) { + // FIXME handle case where we have multiple heads - we need to do something special to handle the parallel branches + // Until they rejoin the head! + _current = null; + _queue.addAll(heads); + } + + /** + * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) + * @param endNode + * @param heads + */ + protected void hitParallelEnd(BlockEndNode endNode, List heads, Collection blackList) { + int branchesAdded = 0; + BlockStartNode start = endNode.getStartNode(); + for (FlowNode f : heads) { + if (!blackList.contains(f)) { + if (branchesAdded == 0) { // We use references because it is more efficient + currentParallelStart = start; + } else { + forkStarts.push(start); + } + branchesAdded++; + } + } + if (branchesAdded > 0) { + parallelDepth++; + } + } + + /** + * Invoked when we complete parallel block, walking from the head (so encountered after the end) + * @param startNode StartNode for the block, + * @param parallelChild Parallel child node that is ending this + * @return FlowNode if we're the last node + */ + protected FlowNode hitParallelStart(FlowNode startNode, FlowNode parallelChild) { + FlowNode output = null; + if (forkStarts.size() > 0) { // More forks (or nested parallel forks) remain + FlowNode end = forkStarts.pop(); + if (end != currentParallelStart) { // Nested parallel branches, and we finished this fork + parallelDepth--; + output = currentParallelStart; + } + // TODO handle case where we do early exit because we encountered stop node + + // If the current end == currentParallelStart then we are finishing another branch of current flow + currentParallelStart = end; + } else { // We're now at the top level of the flow, having finished our last (nested) parallel fork + output = currentParallelStart; + currentParallelStart = null; + parallelDepth--; + } + return output; + } + + @Override + protected FlowNode next(@Nonnull Collection blackList) { + FlowNode output = null; + + // First we look at the parents of the current node if present + if (_current != null) { + List parents = _current.getParents(); + if (parents == null || parents.size() == 0) { + // welp done with this node, guess we consult the queue? + } else if (parents.size() == 1) { + FlowNode p = parents.get(0); + if (p == currentParallelStart) { + // Terminating a parallel scan + FlowNode temp = hitParallelStart(currentParallelStart, p); + if (temp != null) { // Startnode for current parallel block now that it is done + return temp; + } + } else if (!blackList.contains(p)) { + return p; + } + } else if (_current instanceof BlockEndNode && parents.size() > 1) { + // We must be a BlockEndNode that begins this + BlockEndNode end = ((BlockEndNode) _current); + hitParallelEnd(end, parents, blackList); + // Return a node? + } else { + throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+_current.toString()); + } + } + // Welp, now we consult the queue since we've not hit a likely candidate among parents + + return output; + } + } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 3983664c..3eb7e257 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -98,7 +98,7 @@ public void testSimpleScan() throws Exception { FlowExecution exec = b.getExecution(); FlowScanner.ScanAlgorithm[] scans = {new FlowScanner.LinearScanner(), new FlowScanner.DepthFirstScanner(), - new FlowScanner.BlockHoppingScanner() + new FlowScanner.LinearBlockHoppingScanner() }; Predicate echoPredicate = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); @@ -183,8 +183,8 @@ public void testBlockScan() throws Exception { Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); // Test blockhopping - FlowScanner.BlockHoppingScanner blockHoppingScanner = new FlowScanner.BlockHoppingScanner(); - Collection matches = blockHoppingScanner.filter(b.getExecution().getCurrentHeads(), null, matchEchoStep); + FlowScanner.LinearBlockHoppingScanner linearBlockHoppingScanner = new FlowScanner.LinearBlockHoppingScanner(); + Collection matches = linearBlockHoppingScanner.filter(b.getExecution().getCurrentHeads(), null, matchEchoStep); // This means we jumped the blocks Assert.assertEquals(1, matches.size()); @@ -225,7 +225,7 @@ public void testParallelScan() throws Exception { matches = scanner.filter(heads, null, matchEchoStep); Assert.assertTrue(matches.size() == 5); - scanner = new FlowScanner.BlockHoppingScanner(); + scanner = new FlowScanner.LinearBlockHoppingScanner(); matches = scanner.filter(heads, null, matchEchoStep); Assert.assertTrue(matches.size() == 2); } From ae9581b418ace727471faf42949b3ebd42659a27 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 27 Apr 2016 10:37:51 -0400 Subject: [PATCH 017/104] Syntactic sugar and comments on FlowScanner --- .../plugins/workflow/graph/FlowScanner.java | 32 +++++++++++++++---- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 0506ef79..8b0cf8c0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -31,6 +31,7 @@ import org.jenkinsci.plugins.workflow.actions.LogAction; import org.jenkinsci.plugins.workflow.actions.StageAction; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; @@ -58,7 +59,7 @@ public class FlowScanner { * @return Predicate that will match when FlowNode has the action given */ @Nonnull - public static Predicate createPredicateWhereActionExists(@Nonnull final Class actionClass) { + public static Predicate nodeHasActionPredicate(@Nonnull final Class actionClass) { return new Predicate() { @Override public boolean apply(FlowNode input) { @@ -68,11 +69,11 @@ public boolean apply(FlowNode input) { } // Default predicates - static final Predicate MATCH_HAS_LABEL = createPredicateWhereActionExists(LabelAction.class); - static final Predicate MATCH_IS_STAGE = createPredicateWhereActionExists(StageAction.class); - static final Predicate MATCH_HAS_WORKSPACE = createPredicateWhereActionExists(WorkspaceAction.class); - static final Predicate MATCH_HAS_ERROR = createPredicateWhereActionExists(ErrorAction.class); - static final Predicate MATCH_HAS_LOG = createPredicateWhereActionExists(LogAction.class); + public static final Predicate MATCH_HAS_LABEL = nodeHasActionPredicate(LabelAction.class); + public static final Predicate MATCH_IS_STAGE = nodeHasActionPredicate(StageAction.class); + public static final Predicate MATCH_HAS_WORKSPACE = nodeHasActionPredicate(WorkspaceAction.class); + public static final Predicate MATCH_HAS_ERROR = nodeHasActionPredicate(ErrorAction.class); + public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); public interface FlowNodeVisitor { /** @@ -174,16 +175,33 @@ protected Collection convertToFastCheckable(@CheckForNull Collection 5 ? new HashSet(nodeCollection) : nodeCollection; } + // Polymorphic methods for syntactic sugar + @CheckForNull public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.findFirstMatch(heads, null, matchPredicate); } + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { + return this.findFirstMatch(Collections.singleton(head), null, matchPredicate); + } + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predicate matchPredicate) { + if (exec != null && exec.getCurrentHeads() != null) { + return this.findFirstMatch(exec.getCurrentHeads(), null, matchPredicate); + } + return null; + } + @Nonnull public Collection findAllMatches(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.filter(heads, null, matchPredicate); } + + // Basic algo impl public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection endNodes, @@ -383,7 +401,7 @@ protected FlowNode next(@Nonnull Collection blackList) { * Think of it as the opposite reverse of {@link org.jenkinsci.plugins.workflow.graph.FlowScanner.DepthFirstScanner}: * - We visit every node exactly once, but walk through all parallel forks before resuming the main flow * - * This is optimal in many cases, since it need only keep minimal state information + * This is near-optimal in many cases, since it keeps minimal state information and explores parallel blocks first * It is also very easy to make it branch/block-aware, since we have all the fork information at all times. */ public static class ForkScanner extends AbstractFlowScanner { From 1331ddd6c67ceb3b88aeb9ea116934376b8956d7 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Sat, 30 Apr 2016 14:14:21 -0400 Subject: [PATCH 018/104] FlowScanner: switch to simpler iterator use internally, add filterator, rewrite internals for iterator, add block test --- .../plugins/workflow/graph/FlowScanner.java | 357 +++++++++++++----- .../workflow/graph/TestFlowScanner.java | 125 ++++-- 2 files changed, 365 insertions(+), 117 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 8b0cf8c0..d4127bf5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -39,8 +39,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; +import java.util.NoSuchElementException; import java.util.Set; /** @@ -106,26 +109,168 @@ public interface ScanAlgorithm { * @return All nodes matching condition */ @Nonnull - public Collection filter(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); + public Collection filteredNodes(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); /** Used for extracting metrics from the flow graph */ public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor); } + /** Iterator that exposes filtering */ + public interface Filterator extends Iterator { + /** Returns a filtered view of an iterable */ + @Nonnull + public Filterator filter(@Nonnull Predicate matchCondition); + } + + /** Filters an iterator against a match predicate */ + public static class FilteratorImpl implements Filterator { + boolean hasNext = false; + T nextVal; + Iterator wrapped; + Predicate matchCondition; + + public FilteratorImpl filter(Predicate matchCondition) { + return new FilteratorImpl(this, matchCondition); + } + + public FilteratorImpl(@Nonnull Iterator it, @Nonnull Predicate matchCondition) { + this.wrapped = it; + this.matchCondition = matchCondition; + + while(it.hasNext()) { + T val = it.next(); + if (matchCondition.apply(val)) { + this.nextVal = val; + hasNext = true; + break; + } + } + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public T next() { + T returnVal = nextVal; + T nextMatch = null; + + boolean foundMatch = false; + while(wrapped.hasNext()) { + nextMatch = wrapped.next(); + if (matchCondition.apply(nextMatch)) { + foundMatch = true; + break; + } + } + if (foundMatch) { + this.nextVal = nextMatch; + this.hasNext = true; + } else { + this.nextVal = null; + this.hasNext = false; + } + return returnVal; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + /** * Base class for flow scanners, which offers basic methods and stubs for algorithms * Scanners store state internally, and are not thread-safe but are reusable * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies * However internal iteration has access to additional information */ - public static abstract class AbstractFlowScanner implements ScanAlgorithm { + public static abstract class AbstractFlowScanner implements ScanAlgorithm, Iterable , Filterator { // State variables, not all need be used protected ArrayDeque _queue; + protected FlowNode _current; + protected FlowNode _next; + + protected Collection _blackList = Collections.EMPTY_SET; + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public FlowNode next() { + if (_next == null) { + throw new NoSuchElementException(); + } + + // For computing timings and changes, it may be helpful to keep the previous result + // by creating a variable _last and storing _current to it. + +// System.out.println("Current iterator val: " + ((_current == null) ? "null" : _current.getId())); +// System.out.println("Next iterator val: " + ((_next == null) ? "null" : _next.getId())); + _current = _next; + _next = next(_blackList); +// System.out.println("New next val: " + ((_next == null) ? "null" : _next.getId())); + return _current; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("FlowGraphs are immutable, so FlowScanners can't remove nodes"); + } + + @Override + public Iterator iterator() { + return this; + } + + /** + * Set up for iteration/analysis on a graph of nodes, initializing the internal state + * @param heads The head nodes we start walking from (the most recently executed nodes, + * i.e. FlowExecution.getCurrentHeads() + * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) + * @return True if we can have nodes to work with, otherwise false + */ + public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { + if (heads == null || heads.size() == 0) { + return false; + } + Collection fastEndNodes = convertToFastCheckable(blackList); + HashSet filteredHeads = new HashSet(heads); + filteredHeads.removeAll(fastEndNodes); + + if (filteredHeads.size() == 0) { + return false; + } + + reset(); + _blackList = fastEndNodes; + setHeads(filteredHeads); + return true; + } + + /** + * Set up for iteration/analysis on a graph of nodes, initializing the internal state + * @param head The head FlowNode to start walking back from + * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) + * null or empty collection means none + * @return True if we can have nodes to work with, otherwise false + */ + public boolean setup(@CheckForNull FlowNode head, @CheckForNull Collection blackList) { + if (head == null) { + return false; + } + return setup(Collections.singleton(head), blackList); + } + /** Public APIs need to invoke this before searches */ - protected abstract void initialize(); + protected abstract void reset(); /** Add current head nodes to current processing set */ protected abstract void setHeads(@Nonnull Collection heads); @@ -138,30 +283,6 @@ public static abstract class AbstractFlowScanner implements ScanAlgorithm { @CheckForNull protected abstract FlowNode next(@Nonnull Collection blackList); - - /** Fast internal scan from start through single-parent (unbranched) nodes until we hit a node with one of the following: - * - Multiple parents - * - No parents - * - Satisfies the endCondition predicate - * - * @param endCondition Predicate that ends search - * @return Node satisfying condition - */ - @CheckForNull - protected static FlowNode linearScanUntil(@Nonnull FlowNode start, @Nonnull Predicate endCondition) { - while(true) { - if (endCondition.apply(start)){ - break; - } - List parents = start.getParents(); - if (parents == null || parents.size() == 0 || parents.size() > 1) { - break; - } - start = parents.get(0); - } - return start; - } - /** Convert stop nodes to a collection that can efficiently be checked for membership, handling nulls if needed */ @Nonnull protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { @@ -196,8 +317,8 @@ public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predic } @Nonnull - public Collection findAllMatches(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { - return this.filter(heads, null, matchPredicate); + public Collection filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.filteredNodes(heads, null, matchPredicate); } @@ -206,64 +327,51 @@ public Collection findAllMatches(@CheckForNull Collection he public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection endNodes, Predicate matchCondition) { - if (heads == null || heads.size() == 0) { - return null; - } - - initialize(); - Collection fastEndNodes = convertToFastCheckable(endNodes); - Collection filteredHeads = new HashSet(heads); - filteredHeads.removeAll(fastEndNodes); - if (filteredHeads.size() == 0) { + if (!setup(heads, endNodes)) { return null; } - this.setHeads(filteredHeads); - while ((_current = next(fastEndNodes)) != null) { - if (matchCondition.apply(_current)) { - return _current; + for (FlowNode f : this) { + if (matchCondition.apply(f)) { + return f; } } return null; } // Basic algo impl - public List filter(@CheckForNull Collection heads, - @CheckForNull Collection endNodes, - Predicate matchCondition) { - if (heads == null || heads.size() == 0) { - return Collections.EMPTY_LIST; - } - initialize(); - Collection fastEndNodes = convertToFastCheckable(endNodes); - Collection filteredHeads = new HashSet(heads); - if (filteredHeads.size() == 0) { + @Nonnull + public List filteredNodes(@CheckForNull Collection heads, + @CheckForNull Collection endNodes, + Predicate matchCondition) { + if (!setup(heads, endNodes)) { return Collections.EMPTY_LIST; } - filteredHeads.removeAll(fastEndNodes); - this.setHeads(filteredHeads); - ArrayList nodes = new ArrayList(); - while ((_current = next(fastEndNodes)) != null) { - if (matchCondition.apply(_current)) { - nodes.add(_current); + ArrayList nodes = new ArrayList(); + for (FlowNode f : this) { + if (matchCondition.apply(f)) { + nodes.add(f); } } return nodes; } + public Filterator filter(Predicate filterCondition) { + return new FilteratorImpl(this, filterCondition); + } + /** Used for extracting metrics from the flow graph */ + @Nonnull public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { - if (heads == null || heads.size() == 0) { + if (!setup(heads, Collections.EMPTY_SET)) { return; } - initialize(); - this.setHeads(heads); - Collection endNodes = Collections.EMPTY_SET; - - boolean continueAnalysis = true; - while (continueAnalysis && (_current = next(endNodes)) != null) { - continueAnalysis = visitor.visit(_current); + for (FlowNode f : this) { + boolean canContinue = visitor.visit(f); + if (!canContinue) { + break; + } } } } @@ -275,7 +383,7 @@ public static class DepthFirstScanner extends AbstractFlowScanner { protected HashSet _visited = new HashSet(); - protected void initialize() { + protected void reset() { if (this._queue == null) { this._queue = new ArrayDeque(); } else { @@ -287,7 +395,15 @@ protected void initialize() { @Override protected void setHeads(@Nonnull Collection heads) { - _queue.addAll(heads); + Iterator it = heads.iterator(); + if (it.hasNext()) { + FlowNode f = it.next(); + _current = f; + _next = f; + } + while (it.hasNext()) { + _queue.add(it.next()); + } } @Override @@ -322,17 +438,19 @@ protected FlowNode next(@Nonnull Collection blackList) { * Use case: we don't care about parallel branches */ public static class LinearScanner extends AbstractFlowScanner { - protected boolean isFirst = true; @Override - protected void initialize() { - isFirst = true; + protected void reset() { + this._current = null; + this._next = null; + this._blackList = Collections.EMPTY_SET; } @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 0) { this._current = heads.iterator().next(); + this._next = this._current; } } @@ -341,10 +459,6 @@ protected FlowNode next(@Nonnull Collection blackList) { if (_current == null) { return null; } - if (isFirst) { // Kind of cheating, but works - isFirst = false; - return _current; - } List parents = _current.getParents(); if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { @@ -361,12 +475,56 @@ protected FlowNode next(@Nonnull Collection blackList) { * LinearScanner that jumps over nested blocks * Use case: finding information about enclosing blocks or preceding nodes * - Ex: finding out the executor workspace used to run a flownode + * Caveats: + * - If you start on the last node of a completed flow, it will jump straight to start (by design) + * - Will only consider the first branch in a parallel case */ public static class LinearBlockHoppingScanner extends LinearScanner { - protected FlowNode jumpBlock(FlowNode current) { - return (current instanceof BlockEndNode) ? - ((BlockEndNode)current).getStartNode() : current; + @Override + public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { + boolean possiblyStartable = super.setup(heads, blackList); + return possiblyStartable && _current != null; // In case we start at an end block + } + + @Override + protected void setHeads(@Nonnull Collection heads) { + if (heads.size() > 0) { + this._current = jumpBlockScan(heads.iterator().next(), _blackList); + this._next = this._current; + } + } + + /** Keeps jumping over blocks until we hit the first node preceding a block */ + @CheckForNull + protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collection blacklistNodes) { + boolean isDone = false; + FlowNode candidate = node; + + // Find the first candidate node preceding a block... and filtering by blacklist + while (candidate != null && node instanceof BlockEndNode) { + candidate = ((BlockEndNode) candidate).getStartNode(); + if (blacklistNodes.contains(candidate)) { + return null; + } + List parents = candidate.getParents(); + if (parents == null || parents.size() == 0) { + return null; + } + // NULLABLE OPTION + boolean foundNode = false; + for (FlowNode f : parents) { + if (!blacklistNodes.contains(f)) { + candidate = f; // Loop again b/c could be BlockEndNode + foundNode = true; + } + } + if (!foundNode) { + return null; + } + } + + return candidate; } @Override @@ -374,21 +532,11 @@ protected FlowNode next(@Nonnull Collection blackList) { if (_current == null) { return null; } - if (isFirst) { // Hax, but solves the problem - isFirst = false; - return _current; - } List parents = _current.getParents(); if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { - FlowNode jumped = jumpBlock(f); - if (jumped != f) { - _current = jumped; - return next(blackList); - } else { - return f; - } + return jumpBlockScan(f, blackList); } } } @@ -398,7 +546,7 @@ protected FlowNode next(@Nonnull Collection blackList) { /** * Scanner that will scan down forks when we hit parallel blocks. - * Think of it as the opposite reverse of {@link org.jenkinsci.plugins.workflow.graph.FlowScanner.DepthFirstScanner}: + * Think of it as the opposite of {@link org.jenkinsci.plugins.workflow.graph.FlowScanner.DepthFirstScanner}: * - We visit every node exactly once, but walk through all parallel forks before resuming the main flow * * This is near-optimal in many cases, since it keeps minimal state information and explores parallel blocks first @@ -418,20 +566,39 @@ public static class ForkScanner extends AbstractFlowScanner { protected int parallelDepth = 0; @Override - protected void initialize() { + protected void reset() { if (_queue == null) { _queue = new ArrayDeque(); } else { _queue.clear(); } + forkStarts.clear(); + parallelDepth =0; + currentParallelStart = null; + _current = null; + _next = null; } @Override protected void setHeads(@Nonnull Collection heads) { // FIXME handle case where we have multiple heads - we need to do something special to handle the parallel branches // Until they rejoin the head! - _current = null; + _current = null; // Somehow set head like linearhoppoingflowscanner _queue.addAll(heads); + _current = _queue.poll(); + _next = _current; + + // If we fork this to a separate plugin, we can try doing this via + // StepExecution.applyAll(ParallelStepExecution.class, Function) + // using execution.getContext().get(FlowNode.class) to fetch the FlowNodes for parallel execution (the BlockStartNodes) + // But we will need to filter the nodes by which pipeline run is occurring + + LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); + + HashMap parallelStarts = new HashMap(); + // Resolve all the heads to the roots + + // I guess we can only walk the graph until the heads share a common ancestor? } /** @@ -491,7 +658,7 @@ protected FlowNode next(@Nonnull Collection blackList) { if (_current != null) { List parents = _current.getParents(); if (parents == null || parents.size() == 0) { - // welp done with this node, guess we consult the queue? + // welp do ne with this node, guess we consult the queue? } else if (parents.size() == 1) { FlowNode p = parents.get(0); if (p == currentParallelStart) { @@ -512,6 +679,10 @@ protected FlowNode next(@Nonnull Collection blackList) { throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+_current.toString()); } } + if (_queue.size() > 0) { + output = _queue.pop(); + currentParallelStart = forkStarts.pop(); + } // Welp, now we consult the queue since we've not hit a likely candidate among parents return output; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 3eb7e257..67e7e220 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -67,6 +67,8 @@ public boolean apply(FlowNode input) { return outputPredicate; } + Predicate MATCH_ECHO_STEP = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + static final class CollectingVisitor implements FlowScanner.FlowNodeVisitor { ArrayList visited = new ArrayList(); @@ -96,30 +98,46 @@ public void testSimpleScan() throws Exception { )); WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); FlowExecution exec = b.getExecution(); - FlowScanner.ScanAlgorithm[] scans = {new FlowScanner.LinearScanner(), + FlowScanner.AbstractFlowScanner[] scans = {new FlowScanner.LinearScanner(), new FlowScanner.DepthFirstScanner(), new FlowScanner.LinearBlockHoppingScanner() +// new FlowScanner.ForkScanner() }; - Predicate echoPredicate = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); List heads = exec.getCurrentHeads(); + // Iteration tests + for (FlowScanner.AbstractFlowScanner scan : scans) { + System.out.println("Iteration test with scanner: "+scan.getClass()); + scan.setup(heads, null); + + for (int i=6; i>2; i--) { + Assert.assertTrue(scan.hasNext()); + FlowNode f = scan.next(); + Assert.assertEquals(Integer.toString(i), f.getId()); + } + + FlowNode f2 = scan.next(); + Assert.assertFalse(scan.hasNext()); + Assert.assertEquals("2", f2.getId()); + } + // Test expected scans with no stop nodes given (different ways of specifying none) for (FlowScanner.ScanAlgorithm sa : scans) { System.out.println("Testing class: "+sa.getClass()); - FlowNode node = sa.findFirstMatch(heads, null, echoPredicate); + FlowNode node = sa.findFirstMatch(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(exec.getNode("5"), node); - node = sa.findFirstMatch(heads, Collections.EMPTY_LIST, echoPredicate); + node = sa.findFirstMatch(heads, Collections.EMPTY_LIST, MATCH_ECHO_STEP); Assert.assertEquals(exec.getNode("5"), node); - node = sa.findFirstMatch(heads, Collections.EMPTY_SET, echoPredicate); + node = sa.findFirstMatch(heads, Collections.EMPTY_SET, MATCH_ECHO_STEP); Assert.assertEquals(exec.getNode("5"), node); - Collection nodeList = sa.filter(heads, null, echoPredicate); + Collection nodeList = sa.filteredNodes(heads, null, MATCH_ECHO_STEP); FlowNode[] expected = new FlowNode[]{exec.getNode("5"), exec.getNode("4")}; Assert.assertArrayEquals(expected, nodeList.toArray()); - nodeList = sa.filter(heads, Collections.EMPTY_LIST, echoPredicate); + nodeList = sa.filteredNodes(heads, Collections.EMPTY_LIST, MATCH_ECHO_STEP); Assert.assertArrayEquals(expected, nodeList.toArray()); - nodeList = sa.filter(heads, Collections.EMPTY_SET, echoPredicate); + nodeList = sa.filteredNodes(heads, Collections.EMPTY_SET, MATCH_ECHO_STEP); Assert.assertArrayEquals(expected, nodeList.toArray()); } @@ -129,7 +147,7 @@ public void testSimpleScan() throws Exception { FlowNode node = sa.findFirstMatch(heads, null, (Predicate)Predicates.alwaysFalse()); Assert.assertNull(node); - Collection nodeList = sa.filter(heads, null, (Predicate) Predicates.alwaysFalse()); + Collection nodeList = sa.filteredNodes(heads, null, (Predicate) Predicates.alwaysFalse()); Assert.assertNotNull(nodeList); Assert.assertEquals(0, nodeList.size()); } @@ -139,7 +157,7 @@ public void testSimpleScan() throws Exception { // Verify we touch head and foot nodes too for (FlowScanner.ScanAlgorithm sa : scans) { System.out.println("Testing class: " + sa.getClass()); - Collection nodeList = sa.filter(heads, null, (Predicate) Predicates.alwaysTrue()); + Collection nodeList = sa.filteredNodes(heads, null, (Predicate) Predicates.alwaysTrue()); vis.reset(); sa.visitAll(heads, vis); Assert.assertEquals(5, nodeList.size()); @@ -150,17 +168,17 @@ public void testSimpleScan() throws Exception { Collection noMatchEndNode = Collections.singleton(exec.getNode("5")); Collection singleMatchEndNode = Collections.singleton(exec.getNode("4")); for (FlowScanner.ScanAlgorithm sa : scans) { - FlowNode node = sa.findFirstMatch(heads, noMatchEndNode, echoPredicate); + FlowNode node = sa.findFirstMatch(heads, noMatchEndNode, MATCH_ECHO_STEP); Assert.assertNull(node); - Collection nodeList = sa.filter(heads, noMatchEndNode, echoPredicate); + Collection nodeList = sa.filteredNodes(heads, noMatchEndNode, MATCH_ECHO_STEP); Assert.assertNotNull(nodeList); Assert.assertEquals(0, nodeList.size()); // Now we try with a stop list the reduces node set for multiple matches - node = sa.findFirstMatch(heads, singleMatchEndNode, echoPredicate); + node = sa.findFirstMatch(heads, singleMatchEndNode, MATCH_ECHO_STEP); Assert.assertEquals(exec.getNode("5"), node); - nodeList = sa.filter(heads, singleMatchEndNode, echoPredicate); + nodeList = sa.filteredNodes(heads, singleMatchEndNode, MATCH_ECHO_STEP); Assert.assertNotNull(nodeList); Assert.assertEquals(1, nodeList.size()); Assert.assertEquals(exec.getNode("5"), nodeList.iterator().next()); @@ -184,18 +202,57 @@ public void testBlockScan() throws Exception { // Test blockhopping FlowScanner.LinearBlockHoppingScanner linearBlockHoppingScanner = new FlowScanner.LinearBlockHoppingScanner(); - Collection matches = linearBlockHoppingScanner.filter(b.getExecution().getCurrentHeads(), null, matchEchoStep); + Collection matches = linearBlockHoppingScanner.filteredNodes(b.getExecution().getCurrentHeads(), null, matchEchoStep); // This means we jumped the blocks Assert.assertEquals(1, matches.size()); FlowScanner.DepthFirstScanner depthFirstScanner = new FlowScanner.DepthFirstScanner(); - matches = depthFirstScanner.filter(b.getExecution().getCurrentHeads(), null, matchEchoStep); + matches = depthFirstScanner.filteredNodes(b.getExecution().getCurrentHeads(), null, matchEchoStep); // Nodes all covered Assert.assertEquals(3, matches.size()); } + @Test + public void blockJumpTest() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "echo 'sample'\n" + + "node {\n" + + " echo 'inside node' \n" + + "}" + )); + + /** Flow structure (ID - type) + 2 - FlowStartNode (BlockStartNode) + 3 - Echostep + 4 - ExecutorStep (StepStartNode) - WorkspaceAction + 5 - ExecutorStep (StepStartNode) - BodyInvocationAction + 6 - Echostep + 7 - StepEndNode - startId (5) + 8 - StepEndNode - startId (4) + 9 - FlowEndNode + */ + + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + Collection heads = b.getExecution().getCurrentHeads(); + FlowExecution exec = b.getExecution(); + + FlowScanner.LinearBlockHoppingScanner hopper = new FlowScanner.LinearBlockHoppingScanner(); + FlowNode headCandidate = exec.getNode("7"); + hopper.setup(headCandidate, null); + List filtered = hopper.filteredNodes(Collections.singleton(headCandidate), null, MATCH_ECHO_STEP); + Assert.assertEquals(2, filtered.size()); + + filtered = hopper.filteredNodes(Collections.singleton(exec.getNode("8")), null, MATCH_ECHO_STEP); + Assert.assertEquals(1, filtered.size()); + + filtered = hopper.filteredNodes(Collections.singleton(exec.getNode("9")), null, MATCH_ECHO_STEP); + Assert.assertEquals(1, filtered.size()); + } + + /** And the parallel case */ @Test public void testParallelScan() throws Exception { @@ -213,21 +270,41 @@ public void testParallelScan() throws Exception { "parallel steps\n" + "echo 'final'" )); + + /** Flow structure (ID - type) + 2 - FlowStartNode (BlockStartNode) + 3 - Echostep + 4 - ParallelStep (StepStartNode) (start branches) + 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 + 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 + 8 - EchoStep, (branch 1) parent=6 + 9 - StepEndNode, (end branch 1) startId=6, parentId=8 + 10 - EchoStep, (branch 2) parentId=7 + 11 - EchoStep, (branch 2) parentId = 10 + 12 - StepEndNode (end branch 2) startId=7 parentId=11, + 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 + 14 - EchoStep + 15 - FlowEndNode (BlockEndNode) + */ + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); Collection heads = b.getExecution().getCurrentHeads(); - Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); - FlowScanner.ScanAlgorithm scanner = new FlowScanner.LinearScanner(); - Collection matches = scanner.filter(heads, null, matchEchoStep); - Assert.assertTrue(matches.size() >= 3 && matches.size() <= 4); + FlowScanner.AbstractFlowScanner scanner = new FlowScanner.LinearScanner(); + Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertTrue(matches.size() == 3 || matches.size() == 4); // Depending on ordering + scanner = new FlowScanner.DepthFirstScanner(); - matches = scanner.filter(heads, null, matchEchoStep); - Assert.assertTrue(matches.size() == 5); + matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(5, matches.size()); scanner = new FlowScanner.LinearBlockHoppingScanner(); - matches = scanner.filter(heads, null, matchEchoStep); - Assert.assertTrue(matches.size() == 2); + matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(0, matches.size()); + + matches = scanner.filteredNodes(Collections.singleton(b.getExecution().getNode("14")), null); + Assert.assertEquals(2, matches.size()); } } \ No newline at end of file From a307266a905bfcd31ba42a7596f69654609ddc67 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 2 May 2016 12:49:39 -0400 Subject: [PATCH 019/104] More AbstractFlowScanner helpers, add iterator on block starts, add tests, fix blockhoppingscanner --- .../plugins/workflow/graph/FlowScanner.java | 52 +++++++------ .../workflow/graph/TestFlowScanner.java | 73 +++++++++++++++---- 2 files changed, 87 insertions(+), 38 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index d4127bf5..29c7637a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -25,6 +25,7 @@ */ import com.google.common.base.Predicate; +import com.google.common.base.Predicates; import hudson.model.Action; import org.jenkinsci.plugins.workflow.actions.ErrorAction; import org.jenkinsci.plugins.workflow.actions.LabelAction; @@ -77,6 +78,7 @@ public boolean apply(FlowNode input) { public static final Predicate MATCH_HAS_WORKSPACE = nodeHasActionPredicate(WorkspaceAction.class); public static final Predicate MATCH_HAS_ERROR = nodeHasActionPredicate(ErrorAction.class); public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); + public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class); public interface FlowNodeVisitor { /** @@ -115,6 +117,12 @@ public interface ScanAlgorithm { public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor); } + public static Filterator filterableEnclosingBlocks(FlowNode f) { + LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); + scanner.setup(f); + return scanner.filter(MATCH_BLOCK_START); + } + /** Iterator that exposes filtering */ public interface Filterator extends Iterator { /** Returns a filtered view of an iterable */ @@ -269,11 +277,18 @@ public boolean setup(@CheckForNull FlowNode head, @CheckForNull Collection heads); + /** Add current head nodes to current processing set, after filtering by blackList */ + protected abstract void setHeads(@Nonnull Collection filteredHeads); /** * Actual meat of the iteration, get the next node to visit, using & updating state as needed @@ -317,11 +332,14 @@ public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predic } @Nonnull - public Collection filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + public List filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.filteredNodes(heads, null, matchPredicate); } - + @Nonnull + public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { + return this.filteredNodes(Collections.singleton(head), null, matchPredicate); + } // Basic algo impl public FlowNode findFirstMatch(@CheckForNull Collection heads, @@ -498,11 +516,10 @@ protected void setHeads(@Nonnull Collection heads) { /** Keeps jumping over blocks until we hit the first node preceding a block */ @CheckForNull protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collection blacklistNodes) { - boolean isDone = false; FlowNode candidate = node; // Find the first candidate node preceding a block... and filtering by blacklist - while (candidate != null && node instanceof BlockEndNode) { + while (candidate != null && candidate instanceof BlockEndNode) { candidate = ((BlockEndNode) candidate).getStartNode(); if (blacklistNodes.contains(candidate)) { return null; @@ -511,12 +528,12 @@ protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collectio if (parents == null || parents.size() == 0) { return null; } - // NULLABLE OPTION boolean foundNode = false; for (FlowNode f : parents) { if (!blacklistNodes.contains(f)) { candidate = f; // Loop again b/c could be BlockEndNode foundNode = true; + break; } } if (!foundNode) { @@ -536,7 +553,7 @@ protected FlowNode next(@Nonnull Collection blackList) { if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { - return jumpBlockScan(f, blackList); + return (f instanceof BlockEndNode) ? jumpBlockScan(f, blackList) : f; } } } @@ -581,24 +598,15 @@ protected void reset() { @Override protected void setHeads(@Nonnull Collection heads) { - // FIXME handle case where we have multiple heads - we need to do something special to handle the parallel branches - // Until they rejoin the head! + if (heads.size() > 1) { + throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); + // TODO We need to implement this using filterableEnclosingBlocks + // and add nodes to with the start of their parallel branches + } _current = null; // Somehow set head like linearhoppoingflowscanner _queue.addAll(heads); _current = _queue.poll(); _next = _current; - - // If we fork this to a separate plugin, we can try doing this via - // StepExecution.applyAll(ParallelStepExecution.class, Function) - // using execution.getContext().get(FlowNode.class) to fetch the FlowNodes for parallel execution (the BlockStartNodes) - // But we will need to filter the nodes by which pipeline run is occurring - - LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); - - HashMap parallelStarts = new HashMap(); - // Resolve all the heads to the roots - - // I guess we can only walk the graph until the heads share a common ancestor? } /** diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 67e7e220..ab38e3e6 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -96,11 +96,19 @@ public void testSimpleScan() throws Exception { "echo 'donothing'\n" + "echo 'doitagain'" )); + + /** Flow structure (ID - type) + 2 - FlowStartNode + 3 - SleepStep + 4 - EchoStep + 5 - EchoStep + 6 - FlowEndNode + */ + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); FlowExecution exec = b.getExecution(); FlowScanner.AbstractFlowScanner[] scans = {new FlowScanner.LinearScanner(), - new FlowScanner.DepthFirstScanner(), - new FlowScanner.LinearBlockHoppingScanner() + new FlowScanner.DepthFirstScanner() // new FlowScanner.ForkScanner() }; @@ -122,6 +130,15 @@ public void testSimpleScan() throws Exception { Assert.assertEquals("2", f2.getId()); } + // Block Hopping tests + FlowScanner.LinearBlockHoppingScanner scanner = new FlowScanner.LinearBlockHoppingScanner(); + Assert.assertFalse("BlockHopping scanner jumps over the flow when started at end", scanner.setup(heads, Collections.EMPTY_SET)); + List collectedNodes = scanner.filteredNodes(Collections.singleton(exec.getNode("5")), null, (Predicate)Predicates.alwaysTrue()); + Assert.assertEquals(exec.getNode("5"), collectedNodes.get(0)); + Assert.assertEquals(exec.getNode("4"), collectedNodes.get(1)); + Assert.assertEquals(exec.getNode("3"), collectedNodes.get(2)); + Assert.assertEquals(exec.getNode("2"), collectedNodes.get(3)); + // Test expected scans with no stop nodes given (different ways of specifying none) for (FlowScanner.ScanAlgorithm sa : scans) { System.out.println("Testing class: "+sa.getClass()); @@ -187,7 +204,7 @@ public void testSimpleScan() throws Exception { /** Tests the basic scan algorithm where blocks are involved */ @Test - public void testBlockScan() throws Exception { + public void testBasicScanWithBlock() throws Exception { WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); job.setDefinition(new CpsFlowDefinition( "echo 'first'\n" + @@ -197,26 +214,43 @@ public void testBlockScan() throws Exception { "}\n" + "sleep 1" )); + /** Flow structure (ID - type) + 2 - FlowStartNode + 3 - EchoStep + 4 - TimeoutStep + 5 - TimeoutStep with BodyInvocationAction + 6 - EchoStep + 7 - EchoStep + 8 - StepEndNode (BlockEndNode), startId=5 + 9 - StepEndNode (BlockEndNode), startId = 4 + 10 - SleepStep + 11 - FlowEndNode + */ + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + FlowExecution exec = b.getExecution(); + + // Linear analysis + FlowScanner.LinearScanner linearScanner = new FlowScanner.LinearScanner(); + Assert.assertEquals(3, linearScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); + Assert.assertEquals(3, linearScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); // Test blockhopping FlowScanner.LinearBlockHoppingScanner linearBlockHoppingScanner = new FlowScanner.LinearBlockHoppingScanner(); - Collection matches = linearBlockHoppingScanner.filteredNodes(b.getExecution().getCurrentHeads(), null, matchEchoStep); - - // This means we jumped the blocks - Assert.assertEquals(1, matches.size()); + Assert.assertEquals(0, linearBlockHoppingScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); //Hopped + Assert.assertEquals(1, linearBlockHoppingScanner.filteredNodes(exec.getNode("8"), matchEchoStep).size()); + Assert.assertEquals(3, linearBlockHoppingScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); + // Prove we covered all FlowScanner.DepthFirstScanner depthFirstScanner = new FlowScanner.DepthFirstScanner(); - matches = depthFirstScanner.filteredNodes(b.getExecution().getCurrentHeads(), null, matchEchoStep); - - // Nodes all covered - Assert.assertEquals(3, matches.size()); + Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); + Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); } @Test public void blockJumpTest() throws Exception { - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "BlockUsing"); job.setDefinition(new CpsFlowDefinition( "echo 'sample'\n" + "node {\n" + @@ -241,15 +275,22 @@ public void blockJumpTest() throws Exception { FlowScanner.LinearBlockHoppingScanner hopper = new FlowScanner.LinearBlockHoppingScanner(); FlowNode headCandidate = exec.getNode("7"); - hopper.setup(headCandidate, null); - List filtered = hopper.filteredNodes(Collections.singleton(headCandidate), null, MATCH_ECHO_STEP); + Assert.assertEquals(exec.getNode("4"), hopper.jumpBlockScan(headCandidate, Collections.EMPTY_SET)); + Assert.assertTrue("Setup should return true if we can iterate", hopper.setup(headCandidate, null)); + + headCandidate = exec.getNode("6"); + List filtered = hopper.filteredNodes(headCandidate, MATCH_ECHO_STEP); Assert.assertEquals(2, filtered.size()); + headCandidate = exec.getNode("7"); + filtered = hopper.filteredNodes(Collections.singleton(headCandidate), null, MATCH_ECHO_STEP); + Assert.assertEquals(1, filtered.size()); + filtered = hopper.filteredNodes(Collections.singleton(exec.getNode("8")), null, MATCH_ECHO_STEP); Assert.assertEquals(1, filtered.size()); filtered = hopper.filteredNodes(Collections.singleton(exec.getNode("9")), null, MATCH_ECHO_STEP); - Assert.assertEquals(1, filtered.size()); + Assert.assertEquals(0, filtered.size()); } @@ -303,7 +344,7 @@ public void testParallelScan() throws Exception { matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(0, matches.size()); - matches = scanner.filteredNodes(Collections.singleton(b.getExecution().getNode("14")), null); + matches = scanner.filteredNodes(Collections.singleton(b.getExecution().getNode("14")), MATCH_ECHO_STEP); Assert.assertEquals(2, matches.size()); } From 211f297e8198f316b0e6b2ff7866b2db6b0600d2 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 2 May 2016 15:33:10 -0400 Subject: [PATCH 020/104] FlowScanning: ForkScanner passes in basic case, next up is testing nested parallel blocks and blackListing --- .../plugins/workflow/graph/FlowScanner.java | 20 ++++++++++++------- .../workflow/graph/TestFlowScanner.java | 19 ++++++++++++++++-- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 29c7637a..bb56c8be 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -603,7 +603,7 @@ protected void setHeads(@Nonnull Collection heads) { // TODO We need to implement this using filterableEnclosingBlocks // and add nodes to with the start of their parallel branches } - _current = null; // Somehow set head like linearhoppoingflowscanner + _current = null; _queue.addAll(heads); _current = _queue.poll(); _next = _current; @@ -613,15 +613,19 @@ protected void setHeads(@Nonnull Collection heads) { * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) * @param endNode * @param heads + * @return FlowNode next node to visit */ - protected void hitParallelEnd(BlockEndNode endNode, List heads, Collection blackList) { + protected FlowNode hitParallelEnd(BlockEndNode endNode, List heads, Collection blackList) { int branchesAdded = 0; BlockStartNode start = endNode.getStartNode(); + FlowNode output = null; for (FlowNode f : heads) { if (!blackList.contains(f)) { if (branchesAdded == 0) { // We use references because it is more efficient currentParallelStart = start; + output = f; } else { + _queue.push(f); forkStarts.push(start); } branchesAdded++; @@ -630,6 +634,7 @@ protected void hitParallelEnd(BlockEndNode endNode, List heads, Collec if (branchesAdded > 0) { parallelDepth++; } + return output; } /** @@ -641,7 +646,7 @@ protected void hitParallelEnd(BlockEndNode endNode, List heads, Collec protected FlowNode hitParallelStart(FlowNode startNode, FlowNode parallelChild) { FlowNode output = null; if (forkStarts.size() > 0) { // More forks (or nested parallel forks) remain - FlowNode end = forkStarts.pop(); + FlowNode end = forkStarts.peek(); if (end != currentParallelStart) { // Nested parallel branches, and we finished this fork parallelDepth--; output = currentParallelStart; @@ -666,7 +671,7 @@ protected FlowNode next(@Nonnull Collection blackList) { if (_current != null) { List parents = _current.getParents(); if (parents == null || parents.size() == 0) { - // welp do ne with this node, guess we consult the queue? + // welp done with this node, guess we consult the queue? } else if (parents.size() == 1) { FlowNode p = parents.get(0); if (p == currentParallelStart) { @@ -681,8 +686,10 @@ protected FlowNode next(@Nonnull Collection blackList) { } else if (_current instanceof BlockEndNode && parents.size() > 1) { // We must be a BlockEndNode that begins this BlockEndNode end = ((BlockEndNode) _current); - hitParallelEnd(end, parents, blackList); - // Return a node? + FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? + if (possibleOutput != null) { + return possibleOutput; + } } else { throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+_current.toString()); } @@ -691,7 +698,6 @@ protected FlowNode next(@Nonnull Collection blackList) { output = _queue.pop(); currentParallelStart = forkStarts.pop(); } - // Welp, now we consult the queue since we've not hit a likely candidate among parents return output; } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index ab38e3e6..350085cb 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -108,8 +108,8 @@ public void testSimpleScan() throws Exception { WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); FlowExecution exec = b.getExecution(); FlowScanner.AbstractFlowScanner[] scans = {new FlowScanner.LinearScanner(), - new FlowScanner.DepthFirstScanner() -// new FlowScanner.ForkScanner() + new FlowScanner.DepthFirstScanner(), + new FlowScanner.ForkScanner() }; List heads = exec.getCurrentHeads(); @@ -246,6 +246,11 @@ public void testBasicScanWithBlock() throws Exception { FlowScanner.DepthFirstScanner depthFirstScanner = new FlowScanner.DepthFirstScanner(); Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); + + // Prove we covered all + FlowScanner.ForkScanner forkScanner = new FlowScanner.ForkScanner(); + Assert.assertEquals(3, forkScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); + Assert.assertEquals(3, forkScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); } @Test @@ -329,6 +334,7 @@ public void testParallelScan() throws Exception { */ WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowExecution exec = b.getExecution(); Collection heads = b.getExecution().getCurrentHeads(); FlowScanner.AbstractFlowScanner scanner = new FlowScanner.LinearScanner(); @@ -340,6 +346,15 @@ public void testParallelScan() throws Exception { matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); + // We're going to test the ForkScanner more fully + scanner = new FlowScanner.ForkScanner(); + matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(5, matches.size()); + + // Start in one branch, test the forkscanning + Assert.assertEquals(3, scanner.filteredNodes(exec.getNode("12"), MATCH_ECHO_STEP).size()); + Assert.assertEquals(2, scanner.filteredNodes(exec.getNode("9"), MATCH_ECHO_STEP).size()); + scanner = new FlowScanner.LinearBlockHoppingScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(0, matches.size()); From c7145028be1249b4882c3560e79ce55f8c69b491 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 3 May 2016 09:31:24 -0400 Subject: [PATCH 021/104] Add standard gitignore items --- .gitignore | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/.gitignore b/.gitignore index 37cf854d..948c7a3e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,28 @@ target work + +# IntelliJ project files +*.iml +*.iws +*.ipr +.idea +out + +# eclipse project file +.settings +.classpath +.project +build + +# vim +*~ +*.swp + +# ctags +tags + +# OS X +.DS_Store + +# mvn versions:set +pom.xml.versionsBackup From 94857aca5e3fc26ce48a285f5dc11bf6b77f7c01 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 3 May 2016 10:15:28 -0400 Subject: [PATCH 022/104] Test ForkScanner blacklisting with parallel branches & fix issues --- .../plugins/workflow/graph/FlowScanner.java | 23 ++++++++--------- .../workflow/graph/TestFlowScanner.java | 25 ++++++++++++------- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index bb56c8be..6b9cef44 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -611,15 +611,15 @@ protected void setHeads(@Nonnull Collection heads) { /** * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) - * @param endNode - * @param heads + * @param endNode Node where parents merge (final end node for the parallel block) + * @param parents Parent nodes that end here * @return FlowNode next node to visit */ - protected FlowNode hitParallelEnd(BlockEndNode endNode, List heads, Collection blackList) { + protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { int branchesAdded = 0; BlockStartNode start = endNode.getStartNode(); FlowNode output = null; - for (FlowNode f : heads) { + for (FlowNode f : parents) { if (!blackList.contains(f)) { if (branchesAdded == 0) { // We use references because it is more efficient currentParallelStart = start; @@ -639,19 +639,17 @@ protected FlowNode hitParallelEnd(BlockEndNode endNode, List heads, Co /** * Invoked when we complete parallel block, walking from the head (so encountered after the end) - * @param startNode StartNode for the block, - * @param parallelChild Parallel child node that is ending this * @return FlowNode if we're the last node */ - protected FlowNode hitParallelStart(FlowNode startNode, FlowNode parallelChild) { + protected FlowNode hitParallelStart() { FlowNode output = null; if (forkStarts.size() > 0) { // More forks (or nested parallel forks) remain FlowNode end = forkStarts.peek(); - if (end != currentParallelStart) { // Nested parallel branches, and we finished this fork + // Nested parallel branches, finished nested level so we visit the head and enclosing parallel block + if (end != currentParallelStart) { parallelDepth--; output = currentParallelStart; } - // TODO handle case where we do early exit because we encountered stop node // If the current end == currentParallelStart then we are finishing another branch of current flow currentParallelStart = end; @@ -660,7 +658,8 @@ protected FlowNode hitParallelStart(FlowNode startNode, FlowNode parallelChild) currentParallelStart = null; parallelDepth--; } - return output; + // Handle cases where the BlockStartNode for the parallel block is blackListed + return (output != null && !_blackList.contains(output)) ? output : null; } @Override @@ -676,11 +675,11 @@ protected FlowNode next(@Nonnull Collection blackList) { FlowNode p = parents.get(0); if (p == currentParallelStart) { // Terminating a parallel scan - FlowNode temp = hitParallelStart(currentParallelStart, p); + FlowNode temp = hitParallelStart(); if (temp != null) { // Startnode for current parallel block now that it is done return temp; } - } else if (!blackList.contains(p)) { + } else if (!blackList.contains(p)) { return p; } } else if (_current instanceof BlockEndNode && parents.size() > 1) { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 350085cb..0488d627 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -41,6 +41,7 @@ import javax.annotation.Nonnull; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -341,12 +342,19 @@ public void testParallelScan() throws Exception { Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertTrue(matches.size() == 3 || matches.size() == 4); // Depending on ordering - scanner = new FlowScanner.DepthFirstScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); - // We're going to test the ForkScanner more fully + // Block hopping scanner + scanner = new FlowScanner.LinearBlockHoppingScanner(); + matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(0, matches.size()); + + matches = scanner.filteredNodes(Collections.singleton(b.getExecution().getNode("14")), MATCH_ECHO_STEP); + Assert.assertEquals(2, matches.size()); + + // We're going to test the ForkScanner in more depth since this is its natural use scanner = new FlowScanner.ForkScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); @@ -355,12 +363,11 @@ public void testParallelScan() throws Exception { Assert.assertEquals(3, scanner.filteredNodes(exec.getNode("12"), MATCH_ECHO_STEP).size()); Assert.assertEquals(2, scanner.filteredNodes(exec.getNode("9"), MATCH_ECHO_STEP).size()); - scanner = new FlowScanner.LinearBlockHoppingScanner(); - matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); - Assert.assertEquals(0, matches.size()); - - matches = scanner.filteredNodes(Collections.singleton(b.getExecution().getNode("14")), MATCH_ECHO_STEP); - Assert.assertEquals(2, matches.size()); + // Filtering at different points within branches + List blackList = Arrays.asList(exec.getNode("6"), exec.getNode("7")); + Assert.assertEquals(4, scanner.filteredNodes(heads, blackList, MATCH_ECHO_STEP).size()); + Assert.assertEquals(4, scanner.filteredNodes(heads, Collections.singletonList(exec.getNode("4")), MATCH_ECHO_STEP).size()); + blackList = Arrays.asList(exec.getNode("6"), exec.getNode("10")); + Assert.assertEquals(3, scanner.filteredNodes(heads, blackList, MATCH_ECHO_STEP).size()); } - } \ No newline at end of file From 94845a10ff615f8b530fb9a6317a43895e356101 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 3 May 2016 11:58:56 -0400 Subject: [PATCH 023/104] Refactor FlowScanner & affiliated classes to be prepare for review --- .../plugins/workflow/graph/Filterator.java | 13 + .../workflow/graph/FilteratorImpl.java | 65 +++++ .../workflow/graph/FlowNodeVisitor.java | 16 ++ .../plugins/workflow/graph/FlowScanner.java | 262 ++++++------------ .../workflow/graph/TestFlowScanner.java | 17 +- 5 files changed, 183 insertions(+), 190 deletions(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java new file mode 100644 index 00000000..ed46b628 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java @@ -0,0 +1,13 @@ +package org.jenkinsci.plugins.workflow.graph; + +import com.google.common.base.Predicate; + +import javax.annotation.Nonnull; +import java.util.Iterator; + +/** Iterator that exposes filtering */ +public interface Filterator extends Iterator { + /** Returns a filtered view of an iterable */ + @Nonnull + public Filterator filter(@Nonnull Predicate matchCondition); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java new file mode 100644 index 00000000..e551bf95 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java @@ -0,0 +1,65 @@ +package org.jenkinsci.plugins.workflow.graph; + +import com.google.common.base.Predicate; + +import javax.annotation.Nonnull; +import java.util.Iterator; + +/** Filters an iterator against a match predicate */ +public class FilteratorImpl implements Filterator { + boolean hasNext = false; + T nextVal; + Iterator wrapped; + Predicate matchCondition; + + public FilteratorImpl filter(Predicate matchCondition) { + return new FilteratorImpl(this, matchCondition); + } + + public FilteratorImpl(@Nonnull Iterator it, @Nonnull Predicate matchCondition) { + this.wrapped = it; + this.matchCondition = matchCondition; + + while(it.hasNext()) { + T val = it.next(); + if (matchCondition.apply(val)) { + this.nextVal = val; + hasNext = true; + break; + } + } + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public T next() { + T returnVal = nextVal; + T nextMatch = null; + + boolean foundMatch = false; + while(wrapped.hasNext()) { + nextMatch = wrapped.next(); + if (matchCondition.apply(nextMatch)) { + foundMatch = true; + break; + } + } + if (foundMatch) { + this.nextVal = nextMatch; + this.hasNext = true; + } else { + this.nextVal = null; + this.hasNext = false; + } + return returnVal; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java new file mode 100644 index 00000000..be32900b --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java @@ -0,0 +1,16 @@ +package org.jenkinsci.plugins.workflow.graph; + +import javax.annotation.Nonnull; + +/** + * Interface used when examining a pipeline FlowNode graph + */ +public interface FlowNodeVisitor { + /** + * Visit the flow node, and indicate if we should continue analysis + * + * @param f Node to visit + * @return False if we should stop visiting nodes + */ + public boolean visit(@Nonnull FlowNode f); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java index 6b9cef44..35f92373 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java @@ -40,7 +40,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -80,122 +79,19 @@ public boolean apply(FlowNode input) { public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class); - public interface FlowNodeVisitor { - /** - * Visit the flow node, and indicate if we should continue analysis - * @param f Node to visit - * @return False if node is done - */ - public boolean visit(@Nonnull FlowNode f); - } - - /** Interface to be used for scanning/analyzing FlowGraphs with support for different visit orders - */ - public interface ScanAlgorithm { - - /** - * Search for first node (walking from the heads through parents) that matches the condition - * @param heads Nodes to start searching from, which may be filtered against blackList - * @param stopNodes Search doesn't go beyond any of these nodes, null or empty will run to end of flow - * @param matchPredicate Matching condition for search - * @return First node matching condition, or null if none found - */ - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); - - /** - * Search for first node (walking from the heads through parents) that matches the condition - * @param heads Nodes to start searching from, which may be filtered against a blackList - * @param stopNodes Search doesn't go beyond any of these nodes, null or empty will run to end of flow - * @param matchPredicate Matching condition for search - * @return All nodes matching condition - */ - @Nonnull - public Collection filteredNodes(@CheckForNull Collection heads, @CheckForNull Collection stopNodes, @Nonnull Predicate matchPredicate); - - /** Used for extracting metrics from the flow graph */ - public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor); - } - public static Filterator filterableEnclosingBlocks(FlowNode f) { LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); scanner.setup(f); return scanner.filter(MATCH_BLOCK_START); } - /** Iterator that exposes filtering */ - public interface Filterator extends Iterator { - /** Returns a filtered view of an iterable */ - @Nonnull - public Filterator filter(@Nonnull Predicate matchCondition); - } - - /** Filters an iterator against a match predicate */ - public static class FilteratorImpl implements Filterator { - boolean hasNext = false; - T nextVal; - Iterator wrapped; - Predicate matchCondition; - - public FilteratorImpl filter(Predicate matchCondition) { - return new FilteratorImpl(this, matchCondition); - } - - public FilteratorImpl(@Nonnull Iterator it, @Nonnull Predicate matchCondition) { - this.wrapped = it; - this.matchCondition = matchCondition; - - while(it.hasNext()) { - T val = it.next(); - if (matchCondition.apply(val)) { - this.nextVal = val; - hasNext = true; - break; - } - } - } - - @Override - public boolean hasNext() { - return hasNext; - } - - @Override - public T next() { - T returnVal = nextVal; - T nextMatch = null; - - boolean foundMatch = false; - while(wrapped.hasNext()) { - nextMatch = wrapped.next(); - if (matchCondition.apply(nextMatch)) { - foundMatch = true; - break; - } - } - if (foundMatch) { - this.nextVal = nextMatch; - this.hasNext = true; - } else { - this.nextVal = null; - this.hasNext = false; - } - return returnVal; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - } - /** * Base class for flow scanners, which offers basic methods and stubs for algorithms * Scanners store state internally, and are not thread-safe but are reusable * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies * However internal iteration has access to additional information */ - public static abstract class AbstractFlowScanner implements ScanAlgorithm, Iterable , Filterator { + public static abstract class AbstractFlowScanner implements Iterable , Filterator { // State variables, not all need be used protected ArrayDeque _queue; @@ -206,36 +102,17 @@ public static abstract class AbstractFlowScanner implements ScanAlgorithm, Itera protected Collection _blackList = Collections.EMPTY_SET; - @Override - public boolean hasNext() { - return _next != null; - } - - @Override - public FlowNode next() { - if (_next == null) { - throw new NoSuchElementException(); + /** Convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ + @Nonnull + protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { + if (nodeCollection == null || nodeCollection.size()==0) { + return Collections.EMPTY_SET; + } else if (nodeCollection.size() == 1) { + return Collections.singleton(nodeCollection.iterator().next()); + } else if (nodeCollection instanceof Set) { + return nodeCollection; } - - // For computing timings and changes, it may be helpful to keep the previous result - // by creating a variable _last and storing _current to it. - -// System.out.println("Current iterator val: " + ((_current == null) ? "null" : _current.getId())); -// System.out.println("Next iterator val: " + ((_next == null) ? "null" : _next.getId())); - _current = _next; - _next = next(_blackList); -// System.out.println("New next val: " + ((_next == null) ? "null" : _next.getId())); - return _current; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("FlowGraphs are immutable, so FlowScanners can't remove nodes"); - } - - @Override - public Iterator iterator() { - return this; + return nodeCollection.size() > 5 ? new HashSet(nodeCollection) : nodeCollection; } /** @@ -292,53 +169,47 @@ public boolean setup(@CheckForNull FlowNode head) { /** * Actual meat of the iteration, get the next node to visit, using & updating state as needed + * @param current Current node to use in generating next value * @param blackList Nodes that are not eligible for visiting * @return Next node to visit, or null if we've exhausted the node list */ @CheckForNull - protected abstract FlowNode next(@Nonnull Collection blackList); + protected abstract FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList); - /** Convert stop nodes to a collection that can efficiently be checked for membership, handling nulls if needed */ - @Nonnull - protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { - if (nodeCollection == null || nodeCollection.size()==0) { - return Collections.EMPTY_SET; - } else if (nodeCollection.size() == 1) { - return Collections.singleton(nodeCollection.iterator().next()); - } else if (nodeCollection instanceof Set) { - return nodeCollection; - } - return nodeCollection.size() > 5 ? new HashSet(nodeCollection) : nodeCollection; + @Override + public boolean hasNext() { + return _next != null; } - // Polymorphic methods for syntactic sugar + @Override + public FlowNode next() { + if (_next == null) { + throw new NoSuchElementException(); + } - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { - return this.findFirstMatch(heads, null, matchPredicate); - } + // For computing timings and changes, it may be helpful to keep the previous result + // by creating a variable _last and storing _current to it. - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { - return this.findFirstMatch(Collections.singleton(head), null, matchPredicate); +// System.out.println("Current iterator val: " + ((_current == null) ? "null" : _current.getId())); +// System.out.println("Next iterator val: " + ((_next == null) ? "null" : _next.getId())); + _current = _next; + _next = next(_current, _blackList); +// System.out.println("New next val: " + ((_next == null) ? "null" : _next.getId())); + return _current; } - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predicate matchPredicate) { - if (exec != null && exec.getCurrentHeads() != null) { - return this.findFirstMatch(exec.getCurrentHeads(), null, matchPredicate); - } - return null; + @Override + public void remove() { + throw new UnsupportedOperationException("FlowGraphs are immutable, so FlowScanners can't remove nodes"); } - @Nonnull - public List filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { - return this.filteredNodes(heads, null, matchPredicate); + @Override + public Iterator iterator() { + return this; } - @Nonnull - public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { - return this.filteredNodes(Collections.singleton(head), null, matchPredicate); + public Filterator filter(Predicate filterCondition) { + return new FilteratorImpl(this, filterCondition); } // Basic algo impl @@ -357,6 +228,26 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, return null; } + // Polymorphic methods for syntactic sugar + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.findFirstMatch(heads, null, matchPredicate); + } + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { + return this.findFirstMatch(Collections.singleton(head), null, matchPredicate); + } + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predicate matchPredicate) { + if (exec != null && exec.getCurrentHeads() != null) { + return this.findFirstMatch(exec.getCurrentHeads(), null, matchPredicate); + } + return null; + } + // Basic algo impl @Nonnull public List filteredNodes(@CheckForNull Collection heads, @@ -375,10 +266,17 @@ public List filteredNodes(@CheckForNull Collection heads, return nodes; } - public Filterator filter(Predicate filterCondition) { - return new FilteratorImpl(this, filterCondition); + @Nonnull + public List filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.filteredNodes(heads, null, matchPredicate); + } + + @Nonnull + public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { + return this.filteredNodes(Collections.singleton(head), null, matchPredicate); } + /** Used for extracting metrics from the flow graph */ @Nonnull public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { @@ -425,11 +323,11 @@ protected void setHeads(@Nonnull Collection heads) { } @Override - protected FlowNode next(@Nonnull Collection blackList) { + protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { FlowNode output = null; // Walk through parents of current node - if (_current != null) { - List parents = _current.getParents(); + if (current != null) { + List parents = current.getParents(); if (parents != null) { for (FlowNode f : parents) { if (!blackList.contains(f) && !_visited.contains(f)) { @@ -473,11 +371,11 @@ protected void setHeads(@Nonnull Collection heads) { } @Override - protected FlowNode next(@Nonnull Collection blackList) { - if (_current == null) { + protected FlowNode next(FlowNode current, @Nonnull Collection blackList) { + if (current == null) { return null; } - List parents = _current.getParents(); + List parents = current.getParents(); if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { @@ -545,11 +443,11 @@ protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collectio } @Override - protected FlowNode next(@Nonnull Collection blackList) { - if (_current == null) { + protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { + if (current == null) { return null; } - List parents = _current.getParents(); + List parents = current.getParents(); if (parents != null && parents.size() > 0) { for (FlowNode f : parents) { if (!blackList.contains(f)) { @@ -663,12 +561,12 @@ protected FlowNode hitParallelStart() { } @Override - protected FlowNode next(@Nonnull Collection blackList) { + protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { FlowNode output = null; // First we look at the parents of the current node if present - if (_current != null) { - List parents = _current.getParents(); + if (current != null) { + List parents = current.getParents(); if (parents == null || parents.size() == 0) { // welp done with this node, guess we consult the queue? } else if (parents.size() == 1) { @@ -682,9 +580,9 @@ protected FlowNode next(@Nonnull Collection blackList) { } else if (!blackList.contains(p)) { return p; } - } else if (_current instanceof BlockEndNode && parents.size() > 1) { + } else if (current instanceof BlockEndNode && parents.size() > 1) { // We must be a BlockEndNode that begins this - BlockEndNode end = ((BlockEndNode) _current); + BlockEndNode end = ((BlockEndNode) current); FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? if (possibleOutput != null) { return possibleOutput; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 0488d627..396e77ed 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -38,6 +38,7 @@ import org.junit.Test; import org.jvnet.hudson.test.BuildWatcher; import org.jvnet.hudson.test.JenkinsRule; +import org.jenkinsci.plugins.workflow.graph.FlowScanner.AbstractFlowScanner; import javax.annotation.Nonnull; import java.util.ArrayList; @@ -70,7 +71,7 @@ public boolean apply(FlowNode input) { Predicate MATCH_ECHO_STEP = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); - static final class CollectingVisitor implements FlowScanner.FlowNodeVisitor { + static final class CollectingVisitor implements FlowNodeVisitor { ArrayList visited = new ArrayList(); @Override @@ -108,7 +109,7 @@ public void testSimpleScan() throws Exception { WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); FlowExecution exec = b.getExecution(); - FlowScanner.AbstractFlowScanner[] scans = {new FlowScanner.LinearScanner(), + AbstractFlowScanner[] scans = {new FlowScanner.LinearScanner(), new FlowScanner.DepthFirstScanner(), new FlowScanner.ForkScanner() }; @@ -116,7 +117,7 @@ public void testSimpleScan() throws Exception { List heads = exec.getCurrentHeads(); // Iteration tests - for (FlowScanner.AbstractFlowScanner scan : scans) { + for (AbstractFlowScanner scan : scans) { System.out.println("Iteration test with scanner: "+scan.getClass()); scan.setup(heads, null); @@ -141,7 +142,7 @@ public void testSimpleScan() throws Exception { Assert.assertEquals(exec.getNode("2"), collectedNodes.get(3)); // Test expected scans with no stop nodes given (different ways of specifying none) - for (FlowScanner.ScanAlgorithm sa : scans) { + for (AbstractFlowScanner sa : scans) { System.out.println("Testing class: "+sa.getClass()); FlowNode node = sa.findFirstMatch(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(exec.getNode("5"), node); @@ -160,7 +161,7 @@ public void testSimpleScan() throws Exception { } // Test with no matches - for (FlowScanner.ScanAlgorithm sa : scans) { + for (AbstractFlowScanner sa : scans) { System.out.println("Testing class: "+sa.getClass()); FlowNode node = sa.findFirstMatch(heads, null, (Predicate)Predicates.alwaysFalse()); Assert.assertNull(node); @@ -173,7 +174,7 @@ public void testSimpleScan() throws Exception { CollectingVisitor vis = new CollectingVisitor(); // Verify we touch head and foot nodes too - for (FlowScanner.ScanAlgorithm sa : scans) { + for (AbstractFlowScanner sa : scans) { System.out.println("Testing class: " + sa.getClass()); Collection nodeList = sa.filteredNodes(heads, null, (Predicate) Predicates.alwaysTrue()); vis.reset(); @@ -185,7 +186,7 @@ public void testSimpleScan() throws Exception { // Test with a stop node given, sometimes no matches Collection noMatchEndNode = Collections.singleton(exec.getNode("5")); Collection singleMatchEndNode = Collections.singleton(exec.getNode("4")); - for (FlowScanner.ScanAlgorithm sa : scans) { + for (AbstractFlowScanner sa : scans) { FlowNode node = sa.findFirstMatch(heads, noMatchEndNode, MATCH_ECHO_STEP); Assert.assertNull(node); @@ -338,7 +339,7 @@ public void testParallelScan() throws Exception { FlowExecution exec = b.getExecution(); Collection heads = b.getExecution().getCurrentHeads(); - FlowScanner.AbstractFlowScanner scanner = new FlowScanner.LinearScanner(); + AbstractFlowScanner scanner = new FlowScanner.LinearScanner(); Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertTrue(matches.size() == 3 || matches.size() == 4); // Depending on ordering From f4ebc6e33e1b7db78613d7aa834ee6238244d520 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 3 May 2016 15:16:44 -0400 Subject: [PATCH 024/104] Massive refactor: give graph analysis its own package, and split the different flow scanners into individual classes --- .../plugins/workflow/graph/Filterator.java | 13 - .../workflow/graph/FlowNodeVisitor.java | 16 - .../plugins/workflow/graph/FlowScanner.java | 602 ------------------ .../graphanalysis/AbstractFlowScanner.java | 250 ++++++++ .../graphanalysis/DepthFirstScanner.java | 92 +++ .../workflow/graphanalysis/Filterator.java | 39 ++ .../FilteratorImpl.java | 30 +- .../graphanalysis/FlowNodeVisitor.java | 45 ++ .../graphanalysis/FlowScanningUtils.java | 80 +++ .../workflow/graphanalysis/ForkScanner.java | 176 +++++ .../LinearBlockHoppingScanner.java | 106 +++ .../workflow/graphanalysis/LinearScanner.java | 71 +++ .../workflow/graph/TestFlowScanner.java | 33 +- 13 files changed, 906 insertions(+), 647 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java rename src/main/java/org/jenkinsci/plugins/workflow/{graph => graphanalysis}/FilteratorImpl.java (53%) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java deleted file mode 100644 index ed46b628..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/Filterator.java +++ /dev/null @@ -1,13 +0,0 @@ -package org.jenkinsci.plugins.workflow.graph; - -import com.google.common.base.Predicate; - -import javax.annotation.Nonnull; -import java.util.Iterator; - -/** Iterator that exposes filtering */ -public interface Filterator extends Iterator { - /** Returns a filtered view of an iterable */ - @Nonnull - public Filterator filter(@Nonnull Predicate matchCondition); -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java deleted file mode 100644 index be32900b..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNodeVisitor.java +++ /dev/null @@ -1,16 +0,0 @@ -package org.jenkinsci.plugins.workflow.graph; - -import javax.annotation.Nonnull; - -/** - * Interface used when examining a pipeline FlowNode graph - */ -public interface FlowNodeVisitor { - /** - * Visit the flow node, and indicate if we should continue analysis - * - * @param f Node to visit - * @return False if we should stop visiting nodes - */ - public boolean visit(@Nonnull FlowNode f); -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java deleted file mode 100644 index 35f92373..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowScanner.java +++ /dev/null @@ -1,602 +0,0 @@ -package org.jenkinsci.plugins.workflow.graph; - -/* - * The MIT License - * - * Copyright (c) 2016, CloudBees, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import hudson.model.Action; -import org.jenkinsci.plugins.workflow.actions.ErrorAction; -import org.jenkinsci.plugins.workflow.actions.LabelAction; -import org.jenkinsci.plugins.workflow.actions.LogAction; -import org.jenkinsci.plugins.workflow.actions.StageAction; -import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Set; - -/** - * Generified algorithms for scanning pipeline flow graphs for information - * Supports a variety of algorithms for searching, and pluggable conditions - * Worth noting: predicates may be stateful here, and may see some or all of the nodes, depending on the scan method used. - * - * @author Sam Van Oort - */ -public class FlowScanner { - - /** - * Create a predicate that will match on all FlowNodes having a specific action present - * @param actionClass Action class to look for - * @param Action type - * @return Predicate that will match when FlowNode has the action given - */ - @Nonnull - public static Predicate nodeHasActionPredicate(@Nonnull final Class actionClass) { - return new Predicate() { - @Override - public boolean apply(FlowNode input) { - return (input != null && input.getAction(actionClass) != null); - } - }; - } - - // Default predicates - public static final Predicate MATCH_HAS_LABEL = nodeHasActionPredicate(LabelAction.class); - public static final Predicate MATCH_IS_STAGE = nodeHasActionPredicate(StageAction.class); - public static final Predicate MATCH_HAS_WORKSPACE = nodeHasActionPredicate(WorkspaceAction.class); - public static final Predicate MATCH_HAS_ERROR = nodeHasActionPredicate(ErrorAction.class); - public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); - public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class); - - public static Filterator filterableEnclosingBlocks(FlowNode f) { - LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); - scanner.setup(f); - return scanner.filter(MATCH_BLOCK_START); - } - - /** - * Base class for flow scanners, which offers basic methods and stubs for algorithms - * Scanners store state internally, and are not thread-safe but are reusable - * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies - * However internal iteration has access to additional information - */ - public static abstract class AbstractFlowScanner implements Iterable , Filterator { - - // State variables, not all need be used - protected ArrayDeque _queue; - - protected FlowNode _current; - - protected FlowNode _next; - - protected Collection _blackList = Collections.EMPTY_SET; - - /** Convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ - @Nonnull - protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { - if (nodeCollection == null || nodeCollection.size()==0) { - return Collections.EMPTY_SET; - } else if (nodeCollection.size() == 1) { - return Collections.singleton(nodeCollection.iterator().next()); - } else if (nodeCollection instanceof Set) { - return nodeCollection; - } - return nodeCollection.size() > 5 ? new HashSet(nodeCollection) : nodeCollection; - } - - /** - * Set up for iteration/analysis on a graph of nodes, initializing the internal state - * @param heads The head nodes we start walking from (the most recently executed nodes, - * i.e. FlowExecution.getCurrentHeads() - * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) - * @return True if we can have nodes to work with, otherwise false - */ - public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { - if (heads == null || heads.size() == 0) { - return false; - } - Collection fastEndNodes = convertToFastCheckable(blackList); - HashSet filteredHeads = new HashSet(heads); - filteredHeads.removeAll(fastEndNodes); - - if (filteredHeads.size() == 0) { - return false; - } - - reset(); - _blackList = fastEndNodes; - setHeads(filteredHeads); - return true; - } - - /** - * Set up for iteration/analysis on a graph of nodes, initializing the internal state - * @param head The head FlowNode to start walking back from - * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) - * null or empty collection means none - * @return True if we can have nodes to work with, otherwise false - */ - public boolean setup(@CheckForNull FlowNode head, @CheckForNull Collection blackList) { - if (head == null) { - return false; - } - return setup(Collections.singleton(head), blackList); - } - - public boolean setup(@CheckForNull FlowNode head) { - if (head == null) { - return false; - } - return setup(Collections.singleton(head), Collections.EMPTY_SET); - } - - /** Public APIs need to invoke this before searches */ - protected abstract void reset(); - - /** Add current head nodes to current processing set, after filtering by blackList */ - protected abstract void setHeads(@Nonnull Collection filteredHeads); - - /** - * Actual meat of the iteration, get the next node to visit, using & updating state as needed - * @param current Current node to use in generating next value - * @param blackList Nodes that are not eligible for visiting - * @return Next node to visit, or null if we've exhausted the node list - */ - @CheckForNull - protected abstract FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList); - - @Override - public boolean hasNext() { - return _next != null; - } - - @Override - public FlowNode next() { - if (_next == null) { - throw new NoSuchElementException(); - } - - // For computing timings and changes, it may be helpful to keep the previous result - // by creating a variable _last and storing _current to it. - -// System.out.println("Current iterator val: " + ((_current == null) ? "null" : _current.getId())); -// System.out.println("Next iterator val: " + ((_next == null) ? "null" : _next.getId())); - _current = _next; - _next = next(_current, _blackList); -// System.out.println("New next val: " + ((_next == null) ? "null" : _next.getId())); - return _current; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("FlowGraphs are immutable, so FlowScanners can't remove nodes"); - } - - @Override - public Iterator iterator() { - return this; - } - - public Filterator filter(Predicate filterCondition) { - return new FilteratorImpl(this, filterCondition); - } - - // Basic algo impl - public FlowNode findFirstMatch(@CheckForNull Collection heads, - @CheckForNull Collection endNodes, - Predicate matchCondition) { - if (!setup(heads, endNodes)) { - return null; - } - - for (FlowNode f : this) { - if (matchCondition.apply(f)) { - return f; - } - } - return null; - } - - // Polymorphic methods for syntactic sugar - - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { - return this.findFirstMatch(heads, null, matchPredicate); - } - - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { - return this.findFirstMatch(Collections.singleton(head), null, matchPredicate); - } - - @CheckForNull - public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predicate matchPredicate) { - if (exec != null && exec.getCurrentHeads() != null) { - return this.findFirstMatch(exec.getCurrentHeads(), null, matchPredicate); - } - return null; - } - - // Basic algo impl - @Nonnull - public List filteredNodes(@CheckForNull Collection heads, - @CheckForNull Collection endNodes, - Predicate matchCondition) { - if (!setup(heads, endNodes)) { - return Collections.EMPTY_LIST; - } - - ArrayList nodes = new ArrayList(); - for (FlowNode f : this) { - if (matchCondition.apply(f)) { - nodes.add(f); - } - } - return nodes; - } - - @Nonnull - public List filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { - return this.filteredNodes(heads, null, matchPredicate); - } - - @Nonnull - public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { - return this.filteredNodes(Collections.singleton(head), null, matchPredicate); - } - - - /** Used for extracting metrics from the flow graph */ - @Nonnull - public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { - if (!setup(heads, Collections.EMPTY_SET)) { - return; - } - for (FlowNode f : this) { - boolean canContinue = visitor.visit(f); - if (!canContinue) { - break; - } - } - } - } - - /** Does a simple and efficient depth-first search: - * - This will visit each node exactly once, and walks through the first ancestry before revisiting parallel branches - */ - public static class DepthFirstScanner extends AbstractFlowScanner { - - protected HashSet _visited = new HashSet(); - - protected void reset() { - if (this._queue == null) { - this._queue = new ArrayDeque(); - } else { - this._queue.clear(); - } - this._visited.clear(); - this._current = null; - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - Iterator it = heads.iterator(); - if (it.hasNext()) { - FlowNode f = it.next(); - _current = f; - _next = f; - } - while (it.hasNext()) { - _queue.add(it.next()); - } - } - - @Override - protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { - FlowNode output = null; - // Walk through parents of current node - if (current != null) { - List parents = current.getParents(); - if (parents != null) { - for (FlowNode f : parents) { - if (!blackList.contains(f) && !_visited.contains(f)) { - if (output == null ) { - output = f; - } else { - _queue.push(f); - } - } - } - } - } - - if (output == null && _queue.size() > 0) { - output = _queue.pop(); - } - _visited.add(output); // No-op if null - return output; - } - } - - /** - * Scans through a single ancestry, does not cover parallel branches - * Use case: we don't care about parallel branches - */ - public static class LinearScanner extends AbstractFlowScanner { - - @Override - protected void reset() { - this._current = null; - this._next = null; - this._blackList = Collections.EMPTY_SET; - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - if (heads.size() > 0) { - this._current = heads.iterator().next(); - this._next = this._current; - } - } - - @Override - protected FlowNode next(FlowNode current, @Nonnull Collection blackList) { - if (current == null) { - return null; - } - List parents = current.getParents(); - if (parents != null && parents.size() > 0) { - for (FlowNode f : parents) { - if (!blackList.contains(f)) { - return f; - } - } - } - return null; - } - } - - /** - * LinearScanner that jumps over nested blocks - * Use case: finding information about enclosing blocks or preceding nodes - * - Ex: finding out the executor workspace used to run a flownode - * Caveats: - * - If you start on the last node of a completed flow, it will jump straight to start (by design) - * - Will only consider the first branch in a parallel case - */ - public static class LinearBlockHoppingScanner extends LinearScanner { - - @Override - public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { - boolean possiblyStartable = super.setup(heads, blackList); - return possiblyStartable && _current != null; // In case we start at an end block - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - if (heads.size() > 0) { - this._current = jumpBlockScan(heads.iterator().next(), _blackList); - this._next = this._current; - } - } - - /** Keeps jumping over blocks until we hit the first node preceding a block */ - @CheckForNull - protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collection blacklistNodes) { - FlowNode candidate = node; - - // Find the first candidate node preceding a block... and filtering by blacklist - while (candidate != null && candidate instanceof BlockEndNode) { - candidate = ((BlockEndNode) candidate).getStartNode(); - if (blacklistNodes.contains(candidate)) { - return null; - } - List parents = candidate.getParents(); - if (parents == null || parents.size() == 0) { - return null; - } - boolean foundNode = false; - for (FlowNode f : parents) { - if (!blacklistNodes.contains(f)) { - candidate = f; // Loop again b/c could be BlockEndNode - foundNode = true; - break; - } - } - if (!foundNode) { - return null; - } - } - - return candidate; - } - - @Override - protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { - if (current == null) { - return null; - } - List parents = current.getParents(); - if (parents != null && parents.size() > 0) { - for (FlowNode f : parents) { - if (!blackList.contains(f)) { - return (f instanceof BlockEndNode) ? jumpBlockScan(f, blackList) : f; - } - } - } - return null; - } - } - - /** - * Scanner that will scan down forks when we hit parallel blocks. - * Think of it as the opposite of {@link org.jenkinsci.plugins.workflow.graph.FlowScanner.DepthFirstScanner}: - * - We visit every node exactly once, but walk through all parallel forks before resuming the main flow - * - * This is near-optimal in many cases, since it keeps minimal state information and explores parallel blocks first - * It is also very easy to make it branch/block-aware, since we have all the fork information at all times. - */ - public static class ForkScanner extends AbstractFlowScanner { - - /** These are the BlockStartNodes that begin parallel blocks - * There will be one entry for every executing parallel branch in current flow - */ - ArrayDeque forkStarts = new ArrayDeque(); - - /** FlowNode that will terminate the current parallel block */ - FlowNode currentParallelStart = null; - - /** How deep are we in parallel branches, if 0 we are linear */ - protected int parallelDepth = 0; - - @Override - protected void reset() { - if (_queue == null) { - _queue = new ArrayDeque(); - } else { - _queue.clear(); - } - forkStarts.clear(); - parallelDepth =0; - currentParallelStart = null; - _current = null; - _next = null; - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - if (heads.size() > 1) { - throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); - // TODO We need to implement this using filterableEnclosingBlocks - // and add nodes to with the start of their parallel branches - } - _current = null; - _queue.addAll(heads); - _current = _queue.poll(); - _next = _current; - } - - /** - * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) - * @param endNode Node where parents merge (final end node for the parallel block) - * @param parents Parent nodes that end here - * @return FlowNode next node to visit - */ - protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { - int branchesAdded = 0; - BlockStartNode start = endNode.getStartNode(); - FlowNode output = null; - for (FlowNode f : parents) { - if (!blackList.contains(f)) { - if (branchesAdded == 0) { // We use references because it is more efficient - currentParallelStart = start; - output = f; - } else { - _queue.push(f); - forkStarts.push(start); - } - branchesAdded++; - } - } - if (branchesAdded > 0) { - parallelDepth++; - } - return output; - } - - /** - * Invoked when we complete parallel block, walking from the head (so encountered after the end) - * @return FlowNode if we're the last node - */ - protected FlowNode hitParallelStart() { - FlowNode output = null; - if (forkStarts.size() > 0) { // More forks (or nested parallel forks) remain - FlowNode end = forkStarts.peek(); - // Nested parallel branches, finished nested level so we visit the head and enclosing parallel block - if (end != currentParallelStart) { - parallelDepth--; - output = currentParallelStart; - } - - // If the current end == currentParallelStart then we are finishing another branch of current flow - currentParallelStart = end; - } else { // We're now at the top level of the flow, having finished our last (nested) parallel fork - output = currentParallelStart; - currentParallelStart = null; - parallelDepth--; - } - // Handle cases where the BlockStartNode for the parallel block is blackListed - return (output != null && !_blackList.contains(output)) ? output : null; - } - - @Override - protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { - FlowNode output = null; - - // First we look at the parents of the current node if present - if (current != null) { - List parents = current.getParents(); - if (parents == null || parents.size() == 0) { - // welp done with this node, guess we consult the queue? - } else if (parents.size() == 1) { - FlowNode p = parents.get(0); - if (p == currentParallelStart) { - // Terminating a parallel scan - FlowNode temp = hitParallelStart(); - if (temp != null) { // Startnode for current parallel block now that it is done - return temp; - } - } else if (!blackList.contains(p)) { - return p; - } - } else if (current instanceof BlockEndNode && parents.size() > 1) { - // We must be a BlockEndNode that begins this - BlockEndNode end = ((BlockEndNode) current); - FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? - if (possibleOutput != null) { - return possibleOutput; - } - } else { - throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+_current.toString()); - } - } - if (_queue.size() > 0) { - output = _queue.pop(); - currentParallelStart = forkStarts.pop(); - } - - return output; - } - } -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java new file mode 100644 index 00000000..2f84a5d6 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -0,0 +1,250 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import com.google.common.base.Predicate; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Set; + +/** + * Base class for flow scanners, which offers basic methods and stubs for algorithms + * Scanners store state internally, and are not thread-safe but are reusable + * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies + * However internal iteration has access to additional information + * + * @author Sam Van Oort + */ +public abstract class AbstractFlowScanner implements Iterable , Filterator { + + // State variables, not all need be used + protected ArrayDeque _queue; + + protected FlowNode _current; + + protected FlowNode _next; + + protected Collection _blackList = Collections.EMPTY_SET; + + /** Convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ + @Nonnull + protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { + if (nodeCollection == null || nodeCollection.size()==0) { + return Collections.EMPTY_SET; + } else if (nodeCollection.size() == 1) { + return Collections.singleton(nodeCollection.iterator().next()); + } else if (nodeCollection instanceof Set) { + return nodeCollection; + } + return nodeCollection.size() > 5 ? new HashSet(nodeCollection) : nodeCollection; + } + + /** + * Set up for iteration/analysis on a graph of nodes, initializing the internal state + * @param heads The head nodes we start walking from (the most recently executed nodes, + * i.e. FlowExecution.getCurrentHeads() + * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) + * @return True if we can have nodes to work with, otherwise false + */ + public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { + if (heads == null || heads.size() == 0) { + return false; + } + Collection fastEndNodes = convertToFastCheckable(blackList); + HashSet filteredHeads = new HashSet(heads); + filteredHeads.removeAll(fastEndNodes); + + if (filteredHeads.size() == 0) { + return false; + } + + reset(); + _blackList = fastEndNodes; + setHeads(filteredHeads); + return true; + } + + /** + * Set up for iteration/analysis on a graph of nodes, initializing the internal state + * @param head The head FlowNode to start walking back from + * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) + * null or empty collection means none + * @return True if we can have nodes to work with, otherwise false + */ + public boolean setup(@CheckForNull FlowNode head, @CheckForNull Collection blackList) { + if (head == null) { + return false; + } + return setup(Collections.singleton(head), blackList); + } + + public boolean setup(@CheckForNull FlowNode head) { + if (head == null) { + return false; + } + return setup(Collections.singleton(head), Collections.EMPTY_SET); + } + + /** Public APIs need to invoke this before searches */ + protected abstract void reset(); + + /** Add current head nodes to current processing set, after filtering by blackList */ + protected abstract void setHeads(@Nonnull Collection filteredHeads); + + /** + * Actual meat of the iteration, get the next node to visit, using & updating state as needed + * @param current Current node to use in generating next value + * @param blackList Nodes that are not eligible for visiting + * @return Next node to visit, or null if we've exhausted the node list + */ + @CheckForNull + protected abstract FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList); + + @Override + public boolean hasNext() { + return _next != null; + } + + @Override + public FlowNode next() { + if (_next == null) { + throw new NoSuchElementException(); + } + + // For computing timings and changes, it may be helpful to keep the previous result + // by creating a variable _last and storing _current to it. + +// System.out.println("Current iterator val: " + ((_current == null) ? "null" : _current.getId())); +// System.out.println("Next iterator val: " + ((_next == null) ? "null" : _next.getId())); + _current = _next; + _next = next(_current, _blackList); +// System.out.println("New next val: " + ((_next == null) ? "null" : _next.getId())); + return _current; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("FlowGraphs are immutable, so FlowScanners can't remove nodes"); + } + + @Override + public Iterator iterator() { + return this; + } + + public Filterator filter(Predicate filterCondition) { + return new FilteratorImpl(this, filterCondition); + } + + // Basic algo impl + public FlowNode findFirstMatch(@CheckForNull Collection heads, + @CheckForNull Collection endNodes, + Predicate matchCondition) { + if (!setup(heads, endNodes)) { + return null; + } + + for (FlowNode f : this) { + if (matchCondition.apply(f)) { + return f; + } + } + return null; + } + + // Polymorphic methods for syntactic sugar + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.findFirstMatch(heads, null, matchPredicate); + } + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { + return this.findFirstMatch(Collections.singleton(head), null, matchPredicate); + } + + @CheckForNull + public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predicate matchPredicate) { + if (exec != null && exec.getCurrentHeads() != null) { + return this.findFirstMatch(exec.getCurrentHeads(), null, matchPredicate); + } + return null; + } + + // Basic algo impl + @Nonnull + public List filteredNodes(@CheckForNull Collection heads, + @CheckForNull Collection endNodes, + Predicate matchCondition) { + if (!setup(heads, endNodes)) { + return Collections.EMPTY_LIST; + } + + ArrayList nodes = new ArrayList(); + for (FlowNode f : this) { + if (matchCondition.apply(f)) { + nodes.add(f); + } + } + return nodes; + } + + @Nonnull + public List filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { + return this.filteredNodes(heads, null, matchPredicate); + } + + @Nonnull + public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { + return this.filteredNodes(Collections.singleton(head), null, matchPredicate); + } + + + /** Used for extracting metrics from the flow graph */ + @Nonnull + public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { + if (!setup(heads, Collections.EMPTY_SET)) { + return; + } + for (FlowNode f : this) { + boolean canContinue = visitor.visit(f); + if (!canContinue) { + break; + } + } + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java new file mode 100644 index 00000000..aa827827 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -0,0 +1,92 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; + +/** Does a simple and efficient depth-first search: + * - This will visit each node exactly once, and walks through the first ancestry before revisiting parallel branches + * @author Sam Van Oort + */ +public class DepthFirstScanner extends AbstractFlowScanner { + + protected HashSet _visited = new HashSet(); + + protected void reset() { + if (this._queue == null) { + this._queue = new ArrayDeque(); + } else { + this._queue.clear(); + } + this._visited.clear(); + this._current = null; + } + + @Override + protected void setHeads(@Nonnull Collection heads) { + Iterator it = heads.iterator(); + if (it.hasNext()) { + FlowNode f = it.next(); + _current = f; + _next = f; + } + while (it.hasNext()) { + _queue.add(it.next()); + } + } + + @Override + protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { + FlowNode output = null; + // Walk through parents of current node + if (current != null) { + List parents = current.getParents(); + if (parents != null) { + for (FlowNode f : parents) { + if (!blackList.contains(f) && !_visited.contains(f)) { + if (output == null ) { + output = f; + } else { + _queue.push(f); + } + } + } + } + } + + if (output == null && _queue.size() > 0) { + output = _queue.pop(); + } + _visited.add(output); // No-op if null + return output; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java new file mode 100644 index 00000000..b7c766f4 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -0,0 +1,39 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import com.google.common.base.Predicate; + +import javax.annotation.Nonnull; +import java.util.Iterator; + +/** Iterator that allows returned objects to be filtered against a given condition + * @author Sam Van Oort + */ +public interface Filterator extends Iterator { + /** Returns a filtered view of an iterable */ + @Nonnull + public Filterator filter(@Nonnull Predicate matchCondition); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java similarity index 53% rename from src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java rename to src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index e551bf95..7ab78dee 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -1,11 +1,37 @@ -package org.jenkinsci.plugins.workflow.graph; +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; import com.google.common.base.Predicate; import javax.annotation.Nonnull; import java.util.Iterator; -/** Filters an iterator against a match predicate */ +/** Filters an iterator against a match predicate by wrapping an iterator + * @author Sam Van Oort + */ public class FilteratorImpl implements Filterator { boolean hasNext = false; T nextVal; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java new file mode 100644 index 00000000..553ed36e --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java @@ -0,0 +1,45 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.Collection; + +/** + * Interface used when examining a pipeline FlowNode graph node by node, and terminating when a condition is met + * This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} + * @author Sam Van Oort + */ +public interface FlowNodeVisitor { + /** + * Visit the flow node, and indicate if we should continue analysis + * + * @param f Node to visit + * @return False if we should stop visiting nodes + */ + public boolean visit(@Nonnull FlowNode f); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java new file mode 100644 index 00000000..d1053479 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -0,0 +1,80 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import hudson.model.Action; +import org.jenkinsci.plugins.workflow.actions.ErrorAction; +import org.jenkinsci.plugins.workflow.actions.LabelAction; +import org.jenkinsci.plugins.workflow.actions.LogAction; +import org.jenkinsci.plugins.workflow.actions.StageAction; +import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * Library of common functionality when analyzing/walking flow graphs + * @author Sam Van Oort + */ +public class FlowScanningUtils { + + /** + * Create a predicate that will match on all FlowNodes having a specific action present + * @param actionClass Action class to look for + * @param Action type + * @return Predicate that will match when FlowNode has the action given + */ + @Nonnull + public static Predicate nodeHasActionPredicate(@Nonnull final Class actionClass) { + return new Predicate() { + @Override + public boolean apply(FlowNode input) { + return (input != null && input.getAction(actionClass) != null); + } + }; + } + + // Default predicates + public static final Predicate MATCH_HAS_LABEL = nodeHasActionPredicate(LabelAction.class); + public static final Predicate MATCH_IS_STAGE = nodeHasActionPredicate(StageAction.class); + public static final Predicate MATCH_HAS_WORKSPACE = nodeHasActionPredicate(WorkspaceAction.class); + public static final Predicate MATCH_HAS_ERROR = nodeHasActionPredicate(ErrorAction.class); + public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); + public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class); + + public static Filterator filterableEnclosingBlocks(FlowNode f) { + LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); + scanner.setup(f); + return scanner.filter(MATCH_BLOCK_START); + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java new file mode 100644 index 00000000..59bb19ec --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -0,0 +1,176 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.Collection; +import java.util.List; + +/** + * Scanner that will scan down forks when we hit parallel blocks. + * Think of it as the opposite of {@link DepthFirstScanner}: + * - We visit every node exactly once, but walk through all parallel forks before resuming the main flow + * + * This is near-optimal in many cases, since it keeps minimal state information and explores parallel blocks first + * It is also very easy to make it branch/block-aware, since we have all the fork information at all times. + * @author Sam Van Oort + */ +public class ForkScanner extends AbstractFlowScanner { + + /** These are the BlockStartNodes that begin parallel blocks + * There will be one entry for every executing parallel branch in current flow + */ + ArrayDeque forkStarts = new ArrayDeque(); + + /** FlowNode that will terminate the current parallel block */ + FlowNode currentParallelStart = null; + + /** How deep are we in parallel branches, if 0 we are linear */ + protected int parallelDepth = 0; + + @Override + protected void reset() { + if (_queue == null) { + _queue = new ArrayDeque(); + } else { + _queue.clear(); + } + forkStarts.clear(); + parallelDepth =0; + currentParallelStart = null; + _current = null; + _next = null; + } + + @Override + protected void setHeads(@Nonnull Collection heads) { + if (heads.size() > 1) { + throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); + // TODO We need to implement this using filterableEnclosingBlocks + // and add nodes to with the start of their parallel branches + } + _current = null; + _queue.addAll(heads); + _current = _queue.poll(); + _next = _current; + } + + /** + * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) + * @param endNode Node where parents merge (final end node for the parallel block) + * @param parents Parent nodes that end here + * @return FlowNode next node to visit + */ + protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { + int branchesAdded = 0; + BlockStartNode start = endNode.getStartNode(); + FlowNode output = null; + for (FlowNode f : parents) { + if (!blackList.contains(f)) { + if (branchesAdded == 0) { // We use references because it is more efficient + currentParallelStart = start; + output = f; + } else { + _queue.push(f); + forkStarts.push(start); + } + branchesAdded++; + } + } + if (branchesAdded > 0) { + parallelDepth++; + } + return output; + } + + /** + * Invoked when we complete parallel block, walking from the head (so encountered after the end) + * @return FlowNode if we're the last node + */ + protected FlowNode hitParallelStart() { + FlowNode output = null; + if (forkStarts.size() > 0) { // More forks (or nested parallel forks) remain + FlowNode end = forkStarts.peek(); + // Nested parallel branches, finished nested level so we visit the head and enclosing parallel block + if (end != currentParallelStart) { + parallelDepth--; + output = currentParallelStart; + } + + // If the current end == currentParallelStart then we are finishing another branch of current flow + currentParallelStart = end; + } else { // We're now at the top level of the flow, having finished our last (nested) parallel fork + output = currentParallelStart; + currentParallelStart = null; + parallelDepth--; + } + // Handle cases where the BlockStartNode for the parallel block is blackListed + return (output != null && !_blackList.contains(output)) ? output : null; + } + + @Override + protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { + FlowNode output = null; + + // First we look at the parents of the current node if present + if (current != null) { + List parents = current.getParents(); + if (parents == null || parents.size() == 0) { + // welp done with this node, guess we consult the queue? + } else if (parents.size() == 1) { + FlowNode p = parents.get(0); + if (p == currentParallelStart) { + // Terminating a parallel scan + FlowNode temp = hitParallelStart(); + if (temp != null) { // Startnode for current parallel block now that it is done + return temp; + } + } else if (!blackList.contains(p)) { + return p; + } + } else if (current instanceof BlockEndNode && parents.size() > 1) { + // We must be a BlockEndNode that begins this + BlockEndNode end = ((BlockEndNode) current); + FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? + if (possibleOutput != null) { + return possibleOutput; + } + } else { + throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+_current.toString()); + } + } + if (_queue.size() > 0) { + output = _queue.pop(); + currentParallelStart = forkStarts.pop(); + } + + return output; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java new file mode 100644 index 00000000..5f4754f9 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -0,0 +1,106 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.List; + +/** + * LinearScanner that jumps over nested blocks + * Use case: finding information about enclosing blocks or preceding nodes + * - Ex: finding out the executor workspace used to run a flownode + * Caveats: + * - If you start on the last node of a completed flow, it will jump straight to start (by design) + * - Will only consider the first branch in a parallel case + * @author Sam Van Oort + */ +public class LinearBlockHoppingScanner extends LinearScanner { + + @Override + public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { + boolean possiblyStartable = super.setup(heads, blackList); + return possiblyStartable && _current != null; // In case we start at an end block + } + + @Override + protected void setHeads(@Nonnull Collection heads) { + if (heads.size() > 0) { + this._current = jumpBlockScan(heads.iterator().next(), _blackList); + this._next = this._current; + } + } + + /** Keeps jumping over blocks until we hit the first node preceding a block */ + @CheckForNull + protected FlowNode jumpBlockScan(@CheckForNull FlowNode node, @Nonnull Collection blacklistNodes) { + FlowNode candidate = node; + + // Find the first candidate node preceding a block... and filtering by blacklist + while (candidate != null && candidate instanceof BlockEndNode) { + candidate = ((BlockEndNode) candidate).getStartNode(); + if (blacklistNodes.contains(candidate)) { + return null; + } + List parents = candidate.getParents(); + if (parents == null || parents.size() == 0) { + return null; + } + boolean foundNode = false; + for (FlowNode f : parents) { + if (!blacklistNodes.contains(f)) { + candidate = f; // Loop again b/c could be BlockEndNode + foundNode = true; + break; + } + } + if (!foundNode) { + return null; + } + } + + return candidate; + } + + @Override + protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { + if (current == null) { + return null; + } + List parents = current.getParents(); + if (parents != null && parents.size() > 0) { + for (FlowNode f : parents) { + if (!blackList.contains(f)) { + return (f instanceof BlockEndNode) ? jumpBlockScan(f, blackList) : f; + } + } + } + return null; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java new file mode 100644 index 00000000..90e254af --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -0,0 +1,71 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * Scans through a single ancestry, does not cover parallel branches + * Use case: we don't care about parallel branches + * @author Sam Van Oort + */ +public class LinearScanner extends AbstractFlowScanner { + + @Override + protected void reset() { + this._current = null; + this._next = null; + this._blackList = Collections.EMPTY_SET; + } + + @Override + protected void setHeads(@Nonnull Collection heads) { + if (heads.size() > 0) { + this._current = heads.iterator().next(); + this._next = this._current; + } + } + + @Override + protected FlowNode next(FlowNode current, @Nonnull Collection blackList) { + if (current == null) { + return null; + } + List parents = current.getParents(); + if (parents != null && parents.size() > 0) { + for (FlowNode f : parents) { + if (!blackList.contains(f)) { + return f; + } + } + } + return null; + } +} diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java index 396e77ed..76c08984 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java @@ -29,6 +29,11 @@ import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graphanalysis.DepthFirstScanner; +import org.jenkinsci.plugins.workflow.graphanalysis.FlowNodeVisitor; +import org.jenkinsci.plugins.workflow.graphanalysis.ForkScanner; +import org.jenkinsci.plugins.workflow.graphanalysis.LinearBlockHoppingScanner; +import org.jenkinsci.plugins.workflow.graphanalysis.LinearScanner; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.jenkinsci.plugins.workflow.steps.StepDescriptor; @@ -38,7 +43,7 @@ import org.junit.Test; import org.jvnet.hudson.test.BuildWatcher; import org.jvnet.hudson.test.JenkinsRule; -import org.jenkinsci.plugins.workflow.graph.FlowScanner.AbstractFlowScanner; +import org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner; import javax.annotation.Nonnull; import java.util.ArrayList; @@ -109,9 +114,9 @@ public void testSimpleScan() throws Exception { WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); FlowExecution exec = b.getExecution(); - AbstractFlowScanner[] scans = {new FlowScanner.LinearScanner(), - new FlowScanner.DepthFirstScanner(), - new FlowScanner.ForkScanner() + AbstractFlowScanner[] scans = {new LinearScanner(), + new DepthFirstScanner(), + new ForkScanner() }; List heads = exec.getCurrentHeads(); @@ -133,7 +138,7 @@ public void testSimpleScan() throws Exception { } // Block Hopping tests - FlowScanner.LinearBlockHoppingScanner scanner = new FlowScanner.LinearBlockHoppingScanner(); + LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); Assert.assertFalse("BlockHopping scanner jumps over the flow when started at end", scanner.setup(heads, Collections.EMPTY_SET)); List collectedNodes = scanner.filteredNodes(Collections.singleton(exec.getNode("5")), null, (Predicate)Predicates.alwaysTrue()); Assert.assertEquals(exec.getNode("5"), collectedNodes.get(0)); @@ -234,23 +239,23 @@ public void testBasicScanWithBlock() throws Exception { FlowExecution exec = b.getExecution(); // Linear analysis - FlowScanner.LinearScanner linearScanner = new FlowScanner.LinearScanner(); + LinearScanner linearScanner = new LinearScanner(); Assert.assertEquals(3, linearScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); Assert.assertEquals(3, linearScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); // Test blockhopping - FlowScanner.LinearBlockHoppingScanner linearBlockHoppingScanner = new FlowScanner.LinearBlockHoppingScanner(); + LinearBlockHoppingScanner linearBlockHoppingScanner = new LinearBlockHoppingScanner(); Assert.assertEquals(0, linearBlockHoppingScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); //Hopped Assert.assertEquals(1, linearBlockHoppingScanner.filteredNodes(exec.getNode("8"), matchEchoStep).size()); Assert.assertEquals(3, linearBlockHoppingScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); // Prove we covered all - FlowScanner.DepthFirstScanner depthFirstScanner = new FlowScanner.DepthFirstScanner(); + DepthFirstScanner depthFirstScanner = new DepthFirstScanner(); Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); // Prove we covered all - FlowScanner.ForkScanner forkScanner = new FlowScanner.ForkScanner(); + ForkScanner forkScanner = new ForkScanner(); Assert.assertEquals(3, forkScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); Assert.assertEquals(3, forkScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); } @@ -280,7 +285,7 @@ public void blockJumpTest() throws Exception { Collection heads = b.getExecution().getCurrentHeads(); FlowExecution exec = b.getExecution(); - FlowScanner.LinearBlockHoppingScanner hopper = new FlowScanner.LinearBlockHoppingScanner(); + LinearBlockHoppingScanner hopper = new LinearBlockHoppingScanner(); FlowNode headCandidate = exec.getNode("7"); Assert.assertEquals(exec.getNode("4"), hopper.jumpBlockScan(headCandidate, Collections.EMPTY_SET)); Assert.assertTrue("Setup should return true if we can iterate", hopper.setup(headCandidate, null)); @@ -339,16 +344,16 @@ public void testParallelScan() throws Exception { FlowExecution exec = b.getExecution(); Collection heads = b.getExecution().getCurrentHeads(); - AbstractFlowScanner scanner = new FlowScanner.LinearScanner(); + AbstractFlowScanner scanner = new LinearScanner(); Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertTrue(matches.size() == 3 || matches.size() == 4); // Depending on ordering - scanner = new FlowScanner.DepthFirstScanner(); + scanner = new DepthFirstScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); // Block hopping scanner - scanner = new FlowScanner.LinearBlockHoppingScanner(); + scanner = new LinearBlockHoppingScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(0, matches.size()); @@ -356,7 +361,7 @@ public void testParallelScan() throws Exception { Assert.assertEquals(2, matches.size()); // We're going to test the ForkScanner in more depth since this is its natural use - scanner = new FlowScanner.ForkScanner(); + scanner = new ForkScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); From 8e8ddfb5d6c8173d80f41865970caeb66bf82292 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 3 May 2016 17:03:39 -0400 Subject: [PATCH 025/104] Rename graphanalysis test package --- .../workflow/{graph => graphanalysis}/TestFlowScanner.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) rename src/test/java/org/jenkinsci/plugins/workflow/{graph => graphanalysis}/TestFlowScanner.java (99%) diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java similarity index 99% rename from src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java rename to src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java index 76c08984..12bee323 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graph/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java @@ -22,13 +22,14 @@ * THE SOFTWARE. */ -package org.jenkinsci.plugins.workflow.graph; +package org.jenkinsci.plugins.workflow.graphanalysis; import com.google.common.base.Predicate; import com.google.common.base.Predicates; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.FlowNode; import org.jenkinsci.plugins.workflow.graphanalysis.DepthFirstScanner; import org.jenkinsci.plugins.workflow.graphanalysis.FlowNodeVisitor; import org.jenkinsci.plugins.workflow.graphanalysis.ForkScanner; From b90e87869ab5fee5be3ebc13f09274c02a803696 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 3 May 2016 17:10:13 -0400 Subject: [PATCH 026/104] Optimize the DepthFirstWalker by non tracking non-BlockStart nodes --- .../workflow/graphanalysis/DepthFirstScanner.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index aa827827..bf7456ee 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -24,6 +24,7 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; @@ -72,7 +73,9 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection List parents = current.getParents(); if (parents != null) { for (FlowNode f : parents) { - if (!blackList.contains(f) && !_visited.contains(f)) { + // Only ParallelStep nodes may be visited multiple times... but we can't just filter those + // because that's in workflow-cps plugin which depends on this one + if (!blackList.contains(f) && !(f instanceof BlockStartNode && _visited.contains(f))) { if (output == null ) { output = f; } else { @@ -86,7 +89,9 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (output == null && _queue.size() > 0) { output = _queue.pop(); } - _visited.add(output); // No-op if null + if (output instanceof BlockStartNode) { // See above, best step towards just tracking parallel starts + _visited.add(output); + } return output; } } From b44f40245e3a268b40b7b0c05bd8e6dcff725300 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 16 May 2016 23:38:27 -0400 Subject: [PATCH 027/104] Save the WIP for ForkScanner with multiple heads --- .../workflow/graphanalysis/ForkScanner.java | 251 +++++++++++++++++- .../workflow/graphanalysis/LinearScanner.java | 2 + .../graphanalysis/TestFlowScanner.java | 48 ++++ 3 files changed, 292 insertions(+), 9 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 59bb19ec..8d40412f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -28,18 +28,34 @@ import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; +import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import java.util.ArrayDeque; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.ListIterator; +import java.util.Objects; +import java.util.Set; /** - * Scanner that will scan down forks when we hit parallel blocks. - * Think of it as the opposite of {@link DepthFirstScanner}: - * - We visit every node exactly once, but walk through all parallel forks before resuming the main flow + * Scanner that will scan down all forks when we hit parallel blocks before continuing, but generally runs in linear order + * Think of it as the opposite of {@link DepthFirstScanner}. + * + * This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: + * - Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner) + * - All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner) + * - For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner) + * + * The big advantages of this approach: + * - Blocks are visited in the order they end (no backtracking) - helps with working a block at a time + * - Points are visited in linear order within a block (easy to use for analysis) + * - Minimal state information needed + * - Branch information is available for use here * - * This is near-optimal in many cases, since it keeps minimal state information and explores parallel blocks first - * It is also very easy to make it branch/block-aware, since we have all the fork information at all times. * @author Sam Van Oort */ public class ForkScanner extends AbstractFlowScanner { @@ -69,12 +85,215 @@ protected void reset() { _next = null; } + protected static abstract class FlowPiece { + long startTime; + long endTime; + long pauseDuration; + String statusCode; + + // Bounds for a block + String startId; + String endId; + } + + protected static class AtomicStep extends FlowPiece { + + } + + protected static class FlowSegment extends FlowPiece { + ArrayList visited = new ArrayList(); + FlowPiece before; + FlowPiece after; + + /** + * We have discovered a forking node intersecting our FlowSegment in the middle + * Now we need to split the flow + * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments) + * @param forkPoint Node where the flows intersec + * @param forkBranch Flow piece that is joining this + */ + public void split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode forkPoint, @Nonnull FlowPiece forkBranch) { + int index = visited.indexOf(forkPoint); + if (index < 0) { + throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment"); + } + + // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it + // to a new segment and add that to the fork + Fork newFork = new Fork(forkPoint); + FlowSegment newSegment = new FlowSegment(); + newSegment.after = this.after; + newSegment.before = newFork; + if (visited.size() > index+1) { + newSegment.visited.addAll(index+1, visited); + } + newFork.before = this; + newFork.following.add(forkBranch); + newFork.following.add(newSegment); + this.after = newFork; + + // Remove the nodes after the split, and remap the fork points + this.visited.subList(index,visited.size()-1).clear(); + for (FlowNode n : newSegment.visited) { + nodeMapping.put(n, newSegment); + } + nodeMapping.put(forkPoint, newFork); + } + + public void add(FlowNode f) { + this.visited.add(f); + } + } + + protected static class Fork extends FlowPiece { + FlowPiece before; + BlockStartNode forkNode; + List following = new ArrayList(); + + public Fork(BlockStartNode forkNode) { + this.forkNode = forkNode; + } + } + + /** References from a branch to parent, used for creating a sorted hierarchy */ + protected static class ForkRef implements Comparable { + int depth; + FlowNode self; + FlowNode parent; + + /** Sort by depth then by parents, other than that irrelevent */ + @Override + public int compareTo(ForkRef o) { + if (o == null) { + return -1; + } + if (this.depth != o.depth) { + return (this.depth - o.depth); // Deepest first, sorting in reverse order + } + return (this.parent.getId().compareTo(o.parent.getId())); + } + + public boolean equals(Object o) { + if (o == this) { + return true; + } else if (o == null || !(o instanceof ForkRef)) { + return false; + } + return o != null && o instanceof ForkRef && ((ForkRef)o).depth == this.depth && + ((ForkRef)o).self == this.self && ((ForkRef)o).parent == this.parent; + } + + protected ForkRef(int depth, FlowNode self, FlowNode parent) { + this.depth = depth; + this.self = self; + this.parent = parent; + } + } + + /** Endpoint for a fork */ + protected static class ForkHead { + protected FlowNode head; + protected int branchCount = 0; + } + + /** Accumulate all the branch references here, recursively */ + private void addForkRefs(List refs, Fork myFork, int currentDepth) { + List pieces = myFork.following; + for (FlowPiece f : pieces) { + FlowSegment fs = (FlowSegment)f; + refs.add(new ForkRef(currentDepth+1, fs.visited.get(fs.visited.size()-1), myFork.forkNode)); + if (fs.after != null && fs.after instanceof Fork) { + addForkRefs(refs, (Fork)fs.after, currentDepth+1); + } + } + } + + /*private void addToRefs(List refList) { + Collections.sort(refList); + for (ForkRef fr : refList) { + // Add appropriate entries to queue, etc + } + }*/ + + /** + * Constructs the tree mapping each flowNode to its nearest + * @param heads + */ + void leastCommonAncestor(@Nonnull Set heads) { + HashMap branches = new HashMap(); + ArrayList> iterators = new ArrayList>(); + ArrayList liveHeads = new ArrayList(); + + for (FlowNode f : heads) { + iterators.add(FlowScanningUtils.filterableEnclosingBlocks(f)); + FlowSegment b = new FlowSegment(); + b.add(f); + liveHeads.add(b); + branches.put(f, b); + } + + // Walk through until everything has merged to one ancestor + while (iterators.size() > 1) { + ListIterator> itIterator = iterators.listIterator(); + ListIterator pieceIterator = liveHeads.listIterator(); + + while(itIterator.hasNext()) { + Filterator blockStarts = itIterator.next(); + FlowSegment myPiece = pieceIterator.next(); + + // Welp we hit the end of a branch + if (!blockStarts.hasNext()) { + pieceIterator.remove(); + itIterator.remove(); + continue; + } + + FlowNode nextHead = blockStarts.next(); + FlowPiece existingBranch = branches.get(nextHead); + if (existingBranch != null) { // + // Found a case where they convert, replace with a convergent branch + if (existingBranch instanceof Fork) { + Fork f = (Fork)existingBranch; + f.following.add(myPiece); + } else { + ((FlowSegment)existingBranch).split(branches, (BlockStartNode)nextHead, myPiece); + } + itIterator.remove(); + pieceIterator.remove(); + } else { + myPiece.add(nextHead); + branches.put(nextHead, myPiece); + } + } + } + + // Add the ancestry to the forks, note that we alternate fork-flowsegment-fork + ArrayList refs = new ArrayList(); + ArrayDeque children = new ArrayDeque(); + children.add((Fork)liveHeads.get(0).after); + while (children.size() > 0) { + Fork child = children.pop(); + if (child.following != null && child.following.size() > 0) { + // ad dthe fork child and its forks + } + + } + Collections.sort(refs); + // Now we add start points + + + // FIRST: we visit all nodes on the same level, with the same parent + // Add refs to an + // Then we visit their parents + + + } + @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 1) { - throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); - // TODO We need to implement this using filterableEnclosingBlocks - // and add nodes to with the start of their parallel branches + //throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); + leastCommonAncestor(new HashSet(heads)); } _current = null; _queue.addAll(heads); @@ -82,6 +301,19 @@ protected void setHeads(@Nonnull Collection heads) { _next = _current; } + public int getParallelDepth() { + return parallelDepth; + } + + /** + * Return the node that begins the current parallel head + * @return + */ + @CheckForNull + public FlowNode getCurrentParallelStart() { + return currentParallelStart; + } + /** * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) * @param endNode Node where parents merge (final end node for the parallel block) @@ -94,7 +326,8 @@ protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, FlowNode output = null; for (FlowNode f : parents) { if (!blackList.contains(f)) { - if (branchesAdded == 0) { // We use references because it is more efficient + // If this is the first fork, we'll walk up it, and then queue up the others + if (branchesAdded == 0) { currentParallelStart = start; output = f; } else { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 90e254af..1bf6c6e5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -34,6 +34,8 @@ /** * Scans through a single ancestry, does not cover parallel branches * Use case: we don't care about parallel branches + * + * This is the fastest way to walk a flow, because you only care about a single node * @author Sam Van Oort */ public class LinearScanner extends AbstractFlowScanner { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java index 12bee323..068e6c70 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java @@ -366,6 +366,12 @@ public void testParallelScan() throws Exception { matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); +/* ArrayList forkedHeads = new ArrayList(); + forkedHeads.add(exec.getNode("9")); + forkedHeads.add(exec.getNode("11")); + matches = scanner.filteredNodes(forkedHeads, null, MATCH_ECHO_STEP); + Assert.assertEquals(5, matches.size());*/ + // Start in one branch, test the forkscanning Assert.assertEquals(3, scanner.filteredNodes(exec.getNode("12"), MATCH_ECHO_STEP).size()); Assert.assertEquals(2, scanner.filteredNodes(exec.getNode("9"), MATCH_ECHO_STEP).size()); @@ -377,4 +383,46 @@ public void testParallelScan() throws Exception { blackList = Arrays.asList(exec.getNode("6"), exec.getNode("10")); Assert.assertEquals(3, scanner.filteredNodes(heads, blackList, MATCH_ECHO_STEP).size()); } + + @Test + public void testNestedParallelScan() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "echo 'first'\n" + + "def steps = [:]\n" + + "steps['1'] = {\n" + + " echo 'do 1 stuff'\n" + + "}\n" + + "steps['2'] = {\n" + + " echo '2a'\n" + + " def nested = [:]\n" + + " nested['2-1'] = {\n" + + " echo 'do 2-1'\n" + + " } \n" + + " nested['2-2'] = {\n" + + " sleep 1\n" + + " echo '2 section 2'\n" + + " }\n" + + " echo '2b'\n" + + " parallel nested\n" + + "}\n" + + "parallel steps\n" + + "echo 'final'" + )); + + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowExecution exec = b.getExecution(); + Collection heads = b.getExecution().getCurrentHeads(); + + // Basic test of DepthFirstScanner + AbstractFlowScanner scanner = new DepthFirstScanner(); + Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(7, matches.size()); + + + // We're going to test the ForkScanner in more depth since this is its natural use + scanner = new ForkScanner(); + matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(7, matches.size()); + } } \ No newline at end of file From 366e0805fc27d7f39f315d05db01761a963f2b45 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 17 May 2016 11:55:50 -0400 Subject: [PATCH 028/104] Add explanation header to AbstractFlowScanner --- .../workflow/graphanalysis/AbstractFlowScanner.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 2f84a5d6..40895345 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -46,13 +46,18 @@ * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies * However internal iteration has access to additional information * + * Provides 4 sets of common APIs to use, in decreasing expressiveness and increasing genericness: + * - findFirst - find first node match a predicate + * - filteredNodes - return a collection of nodes filtered by a predicate, between heads(inclusive) and blackList (exclusive) + * - visitor - call a visitor on each node we encounter + * - Iterator/filterator based: FlowNode-by-FlowNode walking, after setup with head/blacklist + * + * All operations can start with one or more "head" FlowNodes, and walk back from there, stopping when we hit blackList nodes + * * @author Sam Van Oort */ public abstract class AbstractFlowScanner implements Iterable , Filterator { - // State variables, not all need be used - protected ArrayDeque _queue; - protected FlowNode _current; protected FlowNode _next; From 233107f3bd29bf4232bf64ee87ebb8f53045e980 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 17 May 2016 14:52:43 -0400 Subject: [PATCH 029/104] Clean up from initial review: add and fix javadocs, refactor some field names, make visitAll method take a blackList and add helper for it --- .../graphanalysis/AbstractFlowScanner.java | 145 +++++++++++++----- .../graphanalysis/DepthFirstScanner.java | 40 +++-- .../graphanalysis/FlowScanningUtils.java | 12 +- .../LinearBlockHoppingScanner.java | 28 ++-- .../workflow/graphanalysis/LinearScanner.java | 20 ++- .../graphanalysis/TestFlowScanner.java | 2 +- 6 files changed, 168 insertions(+), 79 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 40895345..c542e48a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -30,7 +30,6 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; -import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -41,30 +40,54 @@ import java.util.Set; /** - * Base class for flow scanners, which offers basic methods and stubs for algorithms - * Scanners store state internally, and are not thread-safe but are reusable - * Scans/analysis of graphs is implemented via internal iteration to allow reusing algorithm bodies - * However internal iteration has access to additional information + * Core APIs and base logic for FlowScanners that extract information from a pipeline execution. * - * Provides 4 sets of common APIs to use, in decreasing expressiveness and increasing genericness: - * - findFirst - find first node match a predicate - * - filteredNodes - return a collection of nodes filtered by a predicate, between heads(inclusive) and blackList (exclusive) - * - visitor - call a visitor on each node we encounter - * - Iterator/filterator based: FlowNode-by-FlowNode walking, after setup with head/blacklist + * These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. * - * All operations can start with one or more "head" FlowNodes, and walk back from there, stopping when we hit blackList nodes + * This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: + * - {@link #findFirstMatch(Collection, Collection, Predicate)}: find the first FlowNode matching predicate condition. + * - {@link #filteredNodes(Collection, Collection, Predicate)}: return the collection of FlowNodes matching the predicate. + * - {@link #visitAll(Collection, FlowNodeVisitor)}: given a {@link FlowNodeVisitor}, invoke {@link FlowNodeVisitor#visit(FlowNode)} on each node and halt when it returns false. + * - Iterator: Each FlowScanner can be used as an Iterator for FlowNode-by-FlowNode walking, + * after you invoke {@link #setup(Collection, Collection)} to initialize it for iteration. + * - {@link Filterator}: If initialized as an Iterator, each FlowScanner can provide a filtered view from the current point in time. + * - Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized. + * + * All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #blackList} nodes (exclusive) or reach the end of the DAG. + * If blackList nodes are an empty collection or null, APIs will walk to the beginning of the FlowGraph. + * Multiple blackList nodes are helpful for putting separate bounds on walking different parallel branches. + * + * Key Points: + * - There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures). + * - Each implementation provides its own iteration order (described in its javadoc comments). + * - Implementations may visit some or all points in the DAG, this should be called out in the class's javadoc comments + * - FlowScanners are NOT thread safe, for performance reasons and because it is too hard to guarantee. + * - Many fields and methods are protected: this is intentional to allow building upon the implementations for more complex analyses. + * - Each FlowScanner stores state internally for several reasons: + * - This state can be used to construct more advanced analyses. + * - FlowScanners can be reinitialized and reused repeatedly: the overheads of creating scanners repeatedly. + * + * Suggested uses: + * - Implement a {@link FlowNodeVisitor} that collects metrics from each FlowNode visited, and call visitAll to extract the data. + * - Find all flownodes of a given type (ex: stages), using {@link #filteredNodes(Collection, Collection, Predicate)} + * - Find the first node with an Error before a specific node + * - Scan through all nodes *just* within a block + * - Use the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} as the head + * - Use the {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} as its blacklist with {@link Collections#singleton(Object)} + * + * TODO: come back and prettify this for HTML-style list formatting. * * @author Sam Van Oort */ public abstract class AbstractFlowScanner implements Iterable , Filterator { - protected FlowNode _current; + protected FlowNode current; - protected FlowNode _next; + protected FlowNode next; - protected Collection _blackList = Collections.EMPTY_SET; + protected Collection blackList = Collections.EMPTY_SET; - /** Convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ + /** Helper: convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ @Nonnull protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { if (nodeCollection == null || nodeCollection.size()==0) { @@ -97,17 +120,13 @@ public boolean setup(@CheckForNull Collection heads, @CheckForNull Col } reset(); - _blackList = fastEndNodes; + blackList = fastEndNodes; setHeads(filteredHeads); return true; } /** - * Set up for iteration/analysis on a graph of nodes, initializing the internal state - * @param head The head FlowNode to start walking back from - * @param blackList Nodes that we cannot visit or walk past (useful to limit scanning to only nodes after a specific point) - * null or empty collection means none - * @return True if we can have nodes to work with, otherwise false + * Helper: version of {@link #setup(Collection, Collection)} where we don't have any nodes to blacklist, and have just a single head */ public boolean setup(@CheckForNull FlowNode head, @CheckForNull Collection blackList) { if (head == null) { @@ -116,6 +135,9 @@ public boolean setup(@CheckForNull FlowNode head, @CheckForNull Collection filteredHeads); /** @@ -140,24 +172,18 @@ public boolean setup(@CheckForNull FlowNode head) { @Override public boolean hasNext() { - return _next != null; + return next != null; } @Override public FlowNode next() { - if (_next == null) { + if (next == null) { throw new NoSuchElementException(); } - // For computing timings and changes, it may be helpful to keep the previous result - // by creating a variable _last and storing _current to it. - -// System.out.println("Current iterator val: " + ((_current == null) ? "null" : _current.getId())); -// System.out.println("Next iterator val: " + ((_next == null) ? "null" : _next.getId())); - _current = _next; - _next = next(_current, _blackList); -// System.out.println("New next val: " + ((_next == null) ? "null" : _next.getId())); - return _current; + current = next; + next = next(current, blackList); + return current; } @Override @@ -170,11 +196,23 @@ public Iterator iterator() { return this; } + /** + * Expose a filtered view of this FlowScanner's output. + * @param filterCondition Filterator only returns {@link FlowNode}s matching this predicate. + * @return A {@link Filterator} against this FlowScanner, which can be filtered in additional ways. + */ + @Override public Filterator filter(Predicate filterCondition) { return new FilteratorImpl(this, filterCondition); } - // Basic algo impl + /** + * Find the first FlowNode within the iteration order matching a given condition + * @param heads Head nodes to start walking from + * @param endNodes + * @param matchCondition Predicate to match when we've successfully found a given node type + * @return First matching node, or null if no matches found + */ public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection endNodes, Predicate matchCondition) { @@ -192,16 +230,19 @@ public FlowNode findFirstMatch(@CheckForNull Collection heads, // Polymorphic methods for syntactic sugar + /** Syntactic sugar for {@link #findFirstMatch(Collection, Collection, Predicate)} where there is no blackList */ @CheckForNull public FlowNode findFirstMatch(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.findFirstMatch(heads, null, matchPredicate); } + /** Syntactic sugar for {@link #findFirstMatch(Collection, Collection, Predicate)} where there is a single head and no blackList */ @CheckForNull public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { return this.findFirstMatch(Collections.singleton(head), null, matchPredicate); } + /** Syntactic sugar for {@link #findFirstMatch(Collection, Collection, Predicate)} using {@link FlowExecution#getCurrentHeads()} to get heads and no blackList */ @CheckForNull public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predicate matchPredicate) { if (exec != null && exec.getCurrentHeads() != null) { @@ -210,12 +251,18 @@ public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predic return null; } - // Basic algo impl + /** + * Return a filtered list of {@link FlowNode}s matching a condition, in the order encountered. + * @param heads Nodes to start iterating backward from by visiting their parents + * @param blackList Nodes we may not visit or walk beyond. + * @param matchCondition Predicate that must be met for nodes to be included in output. + * @return List of flownodes matching the predicate. + */ @Nonnull public List filteredNodes(@CheckForNull Collection heads, - @CheckForNull Collection endNodes, + @CheckForNull Collection blackList, Predicate matchCondition) { - if (!setup(heads, endNodes)) { + if (!setup(heads, blackList)) { return Collections.EMPTY_LIST; } @@ -228,21 +275,30 @@ public List filteredNodes(@CheckForNull Collection heads, return nodes; } + /** Syntactic sugar for {@link #filteredNodes(Collection, Collection, Predicate)} with no blackList nodes */ @Nonnull public List filteredNodes(@CheckForNull Collection heads, @Nonnull Predicate matchPredicate) { return this.filteredNodes(heads, null, matchPredicate); } + /** Syntactic sugar for {@link #filteredNodes(Collection, Collection, Predicate)} with a single head and no blackList nodes */ @Nonnull public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { return this.filteredNodes(Collections.singleton(head), null, matchPredicate); } - /** Used for extracting metrics from the flow graph */ - @Nonnull - public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { - if (!setup(heads, Collections.EMPTY_SET)) { + /** + * Given a {@link FlowNodeVisitor}, invoke {@link FlowNodeVisitor#visit(FlowNode)} on each node and halt early if it returns false. + * + * Useful if you wish to collect some information from every node in the FlowGraph. + * To do that, accumulate internal state in the visitor, and invoke a getter when complete. + * @param heads Nodes to start walking the DAG backwards from. + * @param blackList Nodes we can't visit or pass beyond. + * @param visitor Visitor that will see each FlowNode encountered. + */ + public void visitAll(@CheckForNull Collection heads, @CheckForNull Collection blackList, FlowNodeVisitor visitor) { + if (!setup(heads, blackList)) { return; } for (FlowNode f : this) { @@ -252,4 +308,9 @@ public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor v } } } + + /** Syntactic sugar for {@link #visitAll(Collection, FlowNodeVisitor)} where we don't blacklist any nodes */ + public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { + visitAll(heads, null, visitor); + } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index bf7456ee..966731c6 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -34,22 +34,28 @@ import java.util.Iterator; import java.util.List; -/** Does a simple and efficient depth-first search: - * - This will visit each node exactly once, and walks through the first ancestry before revisiting parallel branches +/** Does a simple and somewhat efficient depth-first search of all FlowNodes in the DAG. + * + * Iteration order: depth-first search, revisiting parallel branches once done. + * With parallel branches, parents are visited in the order encountered. + * + * The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort */ public class DepthFirstScanner extends AbstractFlowScanner { - protected HashSet _visited = new HashSet(); + protected ArrayDeque queue; + + protected HashSet visited = new HashSet(); protected void reset() { - if (this._queue == null) { - this._queue = new ArrayDeque(); + if (this.queue == null) { + this.queue = new ArrayDeque(); } else { - this._queue.clear(); + this.queue.clear(); } - this._visited.clear(); - this._current = null; + this.visited.clear(); + this.current = null; } @Override @@ -57,11 +63,11 @@ protected void setHeads(@Nonnull Collection heads) { Iterator it = heads.iterator(); if (it.hasNext()) { FlowNode f = it.next(); - _current = f; - _next = f; + current = f; + next = f; } while (it.hasNext()) { - _queue.add(it.next()); + queue.add(it.next()); } } @@ -74,23 +80,23 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (parents != null) { for (FlowNode f : parents) { // Only ParallelStep nodes may be visited multiple times... but we can't just filter those - // because that's in workflow-cps plugin which depends on this one - if (!blackList.contains(f) && !(f instanceof BlockStartNode && _visited.contains(f))) { + // because that's in workflow-cps plugin which depends on this one. + if (!blackList.contains(f) && !(f instanceof BlockStartNode && visited.contains(f))) { if (output == null ) { output = f; } else { - _queue.push(f); + queue.push(f); } } } } } - if (output == null && _queue.size() > 0) { - output = _queue.pop(); + if (output == null && queue.size() > 0) { + output = queue.pop(); } if (output instanceof BlockStartNode) { // See above, best step towards just tracking parallel starts - _visited.add(output); + visited.add(output); } return output; } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java index d1053479..a8ad9395 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -64,7 +64,7 @@ public boolean apply(FlowNode input) { }; } - // Default predicates + // Default predicates, which may be used for common conditions public static final Predicate MATCH_HAS_LABEL = nodeHasActionPredicate(LabelAction.class); public static final Predicate MATCH_IS_STAGE = nodeHasActionPredicate(StageAction.class); public static final Predicate MATCH_HAS_WORKSPACE = nodeHasActionPredicate(WorkspaceAction.class); @@ -72,7 +72,15 @@ public boolean apply(FlowNode input) { public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class); - public static Filterator filterableEnclosingBlocks(FlowNode f) { + /** + * Returns all {@link BlockStartNode}s enclosing the given FlowNode, starting from the inside out. + * This is useful if we want to obtain information about its scope, such as the workspace, parallel branch, or label. + * Warning: while this is efficient for one node, batch operations are far more efficient when handling many nodes. + * @param f {@link FlowNode} to start from. + * @return Iterator that returns all enclosing BlockStartNodes from the inside out. + */ + @Nonnull + public static Filterator filterableEnclosingBlocks(@Nonnull FlowNode f) { LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); scanner.setup(f); return scanner.filter(MATCH_BLOCK_START); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index 5f4754f9..7676339e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -25,6 +25,7 @@ package org.jenkinsci.plugins.workflow.graphanalysis; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.CheckForNull; @@ -33,12 +34,21 @@ import java.util.List; /** - * LinearScanner that jumps over nested blocks - * Use case: finding information about enclosing blocks or preceding nodes - * - Ex: finding out the executor workspace used to run a flownode - * Caveats: - * - If you start on the last node of a completed flow, it will jump straight to start (by design) - * - Will only consider the first branch in a parallel case + * Extension of {@link LinearScanner} that skips nested blocks at the current level. + * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow). + * + * This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). + * + * Specifically: + * - Where a {@link BlockEndNode} is encountered, the scanner will jump to the {@link BlockStartNode} and go to its first parent. + * - The only case where you visit branches of a parallel block is if you begin inside it. + * + * Specific use cases: + * - Finding out the executor workspace used to run a FlowNode + * - Finding the start of the parallel block enclosing the current node + * - Locating the label applying to a given FlowNode (if any) + * + * TODO Format me into a tidy HTML list * @author Sam Van Oort */ public class LinearBlockHoppingScanner extends LinearScanner { @@ -46,14 +56,14 @@ public class LinearBlockHoppingScanner extends LinearScanner { @Override public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { boolean possiblyStartable = super.setup(heads, blackList); - return possiblyStartable && _current != null; // In case we start at an end block + return possiblyStartable && current != null; // In case we start at an end block } @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 0) { - this._current = jumpBlockScan(heads.iterator().next(), _blackList); - this._next = this._current; + this.current = jumpBlockScan(heads.iterator().next(), blackList); + this.next = this.current; } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 1bf6c6e5..c92c3381 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -32,26 +32,30 @@ import java.util.List; /** - * Scans through a single ancestry, does not cover parallel branches - * Use case: we don't care about parallel branches + * Scans through the flow graph in strictly linear fashion, visiting only the first branch in parallel blocks. * - * This is the fastest way to walk a flow, because you only care about a single node + * Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} + * This means that where are parallel branches, we will only visit a partial set of {@link FlowNode}s in the directed acyclic graph. + * + * Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. + * + * This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. * @author Sam Van Oort */ public class LinearScanner extends AbstractFlowScanner { @Override protected void reset() { - this._current = null; - this._next = null; - this._blackList = Collections.EMPTY_SET; + this.current = null; + this.next = null; + this.blackList = Collections.EMPTY_SET; } @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 0) { - this._current = heads.iterator().next(); - this._next = this._current; + this.current = heads.iterator().next(); + this.next = this.current; } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java index 068e6c70..cb30cce7 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java @@ -366,7 +366,7 @@ public void testParallelScan() throws Exception { matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(5, matches.size()); -/* ArrayList forkedHeads = new ArrayList(); + /*ArrayList forkedHeads = new ArrayList(); forkedHeads.add(exec.getNode("9")); forkedHeads.add(exec.getNode("11")); matches = scanner.filteredNodes(forkedHeads, null, MATCH_ECHO_STEP); From 2bf282912ad12c97f05a9d74472e875525b478f1 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 17 May 2016 15:39:11 -0400 Subject: [PATCH 030/104] Small changes from review, and @NonNull annotations on FlowNode --- pom.xml | 2 +- .../plugins/workflow/graph/FlowNode.java | 3 +++ .../graphanalysis/DepthFirstScanner.java | 23 ++++++++----------- .../workflow/graphanalysis/Filterator.java | 2 +- .../graphanalysis/FilteratorImpl.java | 8 +++---- .../graphanalysis/FlowScanningUtils.java | 8 ++++--- ...tFlowScanner.java => FlowScannerTest.java} | 2 +- 7 files changed, 25 insertions(+), 23 deletions(-) rename src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/{TestFlowScanner.java => FlowScannerTest.java} (99%) diff --git a/pom.xml b/pom.xml index 41db0596..29ba1666 100644 --- a/pom.xml +++ b/pom.xml @@ -79,7 +79,7 @@ ${project.groupId} workflow-cps - 2.2-SNAPSHOT + 2.2 test diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNode.java b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNode.java index fa49d8ca..f9942ef9 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNode.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graph/FlowNode.java @@ -114,6 +114,7 @@ public final boolean isRunning() { /** * Returns a read-only view of parents. */ + @Nonnull public List getParents() { if (parents == null) { parents = loadParents(parentIds); @@ -121,6 +122,7 @@ public List getParents() { return parents; } + @Nonnull private List loadParents(List parentIds) { List _parents = new ArrayList(parentIds.size()); for (String parentId : parentIds) { @@ -135,6 +137,7 @@ private List loadParents(List parentIds) { @Restricted(DoNotUse.class) @Exported(name="parents") + @Nonnull public List getParentIds() { List ids = new ArrayList(2); for (FlowNode parent : getParents()) { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 966731c6..ee24aa1e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -74,20 +74,17 @@ protected void setHeads(@Nonnull Collection heads) { @Override protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { FlowNode output = null; + // Walk through parents of current node - if (current != null) { - List parents = current.getParents(); - if (parents != null) { - for (FlowNode f : parents) { - // Only ParallelStep nodes may be visited multiple times... but we can't just filter those - // because that's in workflow-cps plugin which depends on this one. - if (!blackList.contains(f) && !(f instanceof BlockStartNode && visited.contains(f))) { - if (output == null ) { - output = f; - } else { - queue.push(f); - } - } + List parents = current.getParents(); // Can't be null + for (FlowNode f : parents) { + // Only ParallelStep nodes may be visited multiple times... but we can't just filter those + // because that's in workflow-cps plugin which depends on this one. + if (!blackList.contains(f) && !(f instanceof BlockStartNode && visited.contains(f))) { + if (output == null ) { + output = f; + } else { + queue.push(f); } } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java index b7c766f4..24c57be4 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -33,7 +33,7 @@ * @author Sam Van Oort */ public interface Filterator extends Iterator { - /** Returns a filtered view of an iterable */ + /** Returns a filtered view of the iterator */ @Nonnull public Filterator filter(@Nonnull Predicate matchCondition); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index 7ab78dee..8bc9beec 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -33,10 +33,10 @@ * @author Sam Van Oort */ public class FilteratorImpl implements Filterator { - boolean hasNext = false; - T nextVal; - Iterator wrapped; - Predicate matchCondition; + private boolean hasNext = false; + private T nextVal; + private Iterator wrapped; + private Predicate matchCondition; public FilteratorImpl filter(Predicate matchCondition) { return new FilteratorImpl(this, matchCondition); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java index a8ad9395..e2ae479d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -46,16 +46,18 @@ * Library of common functionality when analyzing/walking flow graphs * @author Sam Van Oort */ -public class FlowScanningUtils { +public final class FlowScanningUtils { + + /** Prevent instantiation */ + private FlowScanningUtils() {} /** * Create a predicate that will match on all FlowNodes having a specific action present * @param actionClass Action class to look for - * @param Action type * @return Predicate that will match when FlowNode has the action given */ @Nonnull - public static Predicate nodeHasActionPredicate(@Nonnull final Class actionClass) { + public static Predicate nodeHasActionPredicate(@Nonnull final Class actionClass) { return new Predicate() { @Override public boolean apply(FlowNode input) { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java similarity index 99% rename from src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java rename to src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index cb30cce7..1889e315 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestFlowScanner.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -53,7 +53,7 @@ import java.util.Collections; import java.util.List; -public class TestFlowScanner { +public class FlowScannerTest { @ClassRule public static BuildWatcher buildWatcher = new BuildWatcher(); From 733ab4a5d7d449146d06ff4375b0bbb56ffb1272 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 17 May 2016 18:45:44 -0400 Subject: [PATCH 031/104] More changes from review --- .../graphanalysis/AbstractFlowScanner.java | 14 ++++++++++---- .../workflow/graphanalysis/DepthFirstScanner.java | 7 +++++-- .../plugins/workflow/graphanalysis/Filterator.java | 8 ++++++-- .../workflow/graphanalysis/FilteratorImpl.java | 2 +- 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index c542e48a..f07e4de2 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -25,6 +25,7 @@ package org.jenkinsci.plugins.workflow.graphanalysis; import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableSet; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.FlowNode; @@ -35,6 +36,8 @@ import java.util.Collections; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; import java.util.Set; @@ -112,7 +115,7 @@ public boolean setup(@CheckForNull Collection heads, @CheckForNull Col return false; } Collection fastEndNodes = convertToFastCheckable(blackList); - HashSet filteredHeads = new HashSet(heads); + LinkedHashSet filteredHeads = new LinkedHashSet(heads); filteredHeads.removeAll(fastEndNodes); if (filteredHeads.size() == 0) { @@ -187,11 +190,12 @@ public FlowNode next() { } @Override - public void remove() { + public final void remove() { throw new UnsupportedOperationException("FlowGraphs are immutable, so FlowScanners can't remove nodes"); } @Override + @Nonnull public Iterator iterator() { return this; } @@ -202,6 +206,7 @@ public Iterator iterator() { * @return A {@link Filterator} against this FlowScanner, which can be filtered in additional ways. */ @Override + @Nonnull public Filterator filter(Predicate filterCondition) { return new FilteratorImpl(this, filterCondition); } @@ -213,6 +218,7 @@ public Filterator filter(Predicate filterCondition) { * @param matchCondition Predicate to match when we've successfully found a given node type * @return First matching node, or null if no matches found */ + @CheckForNull public FlowNode findFirstMatch(@CheckForNull Collection heads, @CheckForNull Collection endNodes, Predicate matchCondition) { @@ -297,7 +303,7 @@ public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predic * @param blackList Nodes we can't visit or pass beyond. * @param visitor Visitor that will see each FlowNode encountered. */ - public void visitAll(@CheckForNull Collection heads, @CheckForNull Collection blackList, FlowNodeVisitor visitor) { + public void visitAll(@CheckForNull Collection heads, @CheckForNull Collection blackList, @Nonnull FlowNodeVisitor visitor) { if (!setup(heads, blackList)) { return; } @@ -310,7 +316,7 @@ public void visitAll(@CheckForNull Collection heads, @CheckForNull Col } /** Syntactic sugar for {@link #visitAll(Collection, FlowNodeVisitor)} where we don't blacklist any nodes */ - public void visitAll(@CheckForNull Collection heads, FlowNodeVisitor visitor) { + public void visitAll(@CheckForNull Collection heads, @Nonnull FlowNodeVisitor visitor) { visitAll(heads, null, visitor); } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index ee24aa1e..e478dcc1 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -82,7 +82,7 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection // because that's in workflow-cps plugin which depends on this one. if (!blackList.contains(f) && !(f instanceof BlockStartNode && visited.contains(f))) { if (output == null ) { - output = f; + output = f; // Do direct assignment rather than needless push/pop } else { queue.push(f); } @@ -92,7 +92,10 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (output == null && queue.size() > 0) { output = queue.pop(); } - if (output instanceof BlockStartNode) { // See above, best step towards just tracking parallel starts + + // Only BlockStartNodes, specifically ParallelStep can be the parent of multiple child nodes + // Thus they're the only nodes we need to avoid visiting multiple times by recording the visit + if (output instanceof BlockStartNode) { visited.add(output); } return output; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java index 24c57be4..33b678a0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -29,11 +29,15 @@ import javax.annotation.Nonnull; import java.util.Iterator; -/** Iterator that allows returned objects to be filtered against a given condition +/** Iterator that may be navigated through a filtered wrapper. + * + * As a rule, assume that returned Filterators wrap an iterator and pass calls to it. + * Thus the iterator position will change if next() is called on the filtered versions. + * Note also: you may filter a filterator, if needed. * @author Sam Van Oort */ public interface Filterator extends Iterator { - /** Returns a filtered view of the iterator */ + /** Returns a filtered view of the iterator, which calls the iterator until matches are found */ @Nonnull public Filterator filter(@Nonnull Predicate matchCondition); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index 8bc9beec..9294ac27 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -86,6 +86,6 @@ public T next() { @Override public void remove() { - throw new UnsupportedOperationException(); + wrapped.remove(); } } From 7c3179414770defd91f7719294807dbfb5aead12 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 18 May 2016 04:50:02 -0400 Subject: [PATCH 032/104] Refactor and fixes from last refactor --- .../graphanalysis/AbstractFlowScanner.java | 20 +-- .../graphanalysis/DepthFirstScanner.java | 6 +- .../workflow/graphanalysis/ForkScanner.java | 157 ++++++++++-------- .../LinearBlockHoppingScanner.java | 8 +- .../workflow/graphanalysis/LinearScanner.java | 10 +- 5 files changed, 107 insertions(+), 94 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index f07e4de2..aeb830e3 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -25,7 +25,6 @@ package org.jenkinsci.plugins.workflow.graphanalysis; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableSet; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.FlowNode; @@ -37,7 +36,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; -import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; import java.util.Set; @@ -84,11 +82,11 @@ */ public abstract class AbstractFlowScanner implements Iterable , Filterator { - protected FlowNode current; + protected FlowNode myCurrent; - protected FlowNode next; + protected FlowNode myNext; - protected Collection blackList = Collections.EMPTY_SET; + protected Collection myBlackList = Collections.EMPTY_SET; /** Helper: convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ @Nonnull @@ -158,7 +156,7 @@ public boolean setup(@CheckForNull FlowNode head) { * This method makes several assumptions: * * - {@link #reset()} has already been invoked to reset state - * - filteredHeads has already had any points in {@link #blackList} removed + * - filteredHeads has already had any points in {@link #myBlackList} removed * - none of the filteredHeads are null * @param filteredHeads Head nodes that have been filtered against blackList */ @@ -175,18 +173,18 @@ public boolean setup(@CheckForNull FlowNode head) { @Override public boolean hasNext() { - return next != null; + return myNext != null; } @Override public FlowNode next() { - if (next == null) { + if (myNext == null) { throw new NoSuchElementException(); } - current = next; - next = next(current, blackList); - return current; + myCurrent = myNext; + myNext = next(myCurrent, myBlackList); + return myCurrent; } @Override diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index e478dcc1..9d6c272e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -55,7 +55,7 @@ protected void reset() { this.queue.clear(); } this.visited.clear(); - this.current = null; + this.myCurrent = null; } @Override @@ -63,8 +63,8 @@ protected void setHeads(@Nonnull Collection heads) { Iterator it = heads.iterator(); if (it.hasNext()) { FlowNode f = it.next(); - current = f; - next = f; + myCurrent = f; + myNext = f; } while (it.hasNext()) { queue.add(it.next()); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 8d40412f..3be4c285 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -26,6 +26,7 @@ import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowEndNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.CheckForNull; @@ -38,7 +39,6 @@ import java.util.HashSet; import java.util.List; import java.util.ListIterator; -import java.util.Objects; import java.util.Set; /** @@ -60,29 +60,41 @@ */ public class ForkScanner extends AbstractFlowScanner { - /** These are the BlockStartNodes that begin parallel blocks - * There will be one entry for every executing parallel branch in current flow - */ - ArrayDeque forkStarts = new ArrayDeque(); + // Last element in stack is end of myCurrent parallel start, first is myCurrent start + ArrayDeque parallelBlockStartStack = new ArrayDeque(); + + /** FlowNode that will terminate the myCurrent parallel block */ + FlowNode currentParallelStartNode = null; - /** FlowNode that will terminate the current parallel block */ - FlowNode currentParallelStart = null; + ParallelBlockStart currentParallelStart = null; - /** How deep are we in parallel branches, if 0 we are linear */ - protected int parallelDepth = 0; + private boolean walkingFromFinish = false; @Override protected void reset() { - if (_queue == null) { - _queue = new ArrayDeque(); - } else { - _queue.clear(); - } - forkStarts.clear(); - parallelDepth =0; + parallelBlockStartStack.clear(); currentParallelStart = null; - _current = null; - _next = null; + currentParallelStartNode = null; + myCurrent = null; + myNext = null; + } + + /** If true, we are walking from the flow end node and have a complete view of the flow */ + public boolean isWalkingFromFinish() { + return walkingFromFinish; + } + + /** Tracks state for parallel blocks */ + protected static class ParallelBlockStart { + protected BlockStartNode forkStart; // This is the node with child branches + protected int remainingBranches; + protected int totalBranches; + protected ArrayDeque unvisited; // Remaining branches of this that we have have not visited yet + + protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { + this.forkStart = forkStart; + this.remainingBranches = branchCount; + } } protected static abstract class FlowPiece { @@ -96,10 +108,6 @@ protected static abstract class FlowPiece { String endId; } - protected static class AtomicStep extends FlowPiece { - - } - protected static class FlowSegment extends FlowPiece { ArrayList visited = new ArrayList(); FlowPiece before; @@ -190,12 +198,6 @@ protected ForkRef(int depth, FlowNode self, FlowNode parent) { } } - /** Endpoint for a fork */ - protected static class ForkHead { - protected FlowNode head; - protected int branchCount = 0; - } - /** Accumulate all the branch references here, recursively */ private void addForkRefs(List refs, Fork myFork, int currentDepth) { List pieces = myFork.following; @@ -220,6 +222,8 @@ private void addForkRefs(List refs, Fork myFork, int currentDepth) { * @param heads */ void leastCommonAncestor(@Nonnull Set heads) { + // FIX ME: nest in the parallel blockstart nodes, as we see further back ones, add them on hte opposite side of pushing + HashMap branches = new HashMap(); ArrayList> iterators = new ArrayList>(); ArrayList liveHeads = new ArrayList(); @@ -281,6 +285,8 @@ void leastCommonAncestor(@Nonnull Set heads) { Collections.sort(refs); // Now we add start points + // TODO : don't use sorted ForkRef, just applying the ParallelBlockStarts as we go, pushing in the tree levels + // FIRST: we visit all nodes on the same level, with the same parent // Add refs to an @@ -294,15 +300,13 @@ protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 1) { //throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); leastCommonAncestor(new HashSet(heads)); + walkingFromFinish = false; + } else { + FlowNode f = heads.iterator().next(); + walkingFromFinish = f instanceof FlowEndNode; + myCurrent = f; + myNext = f; } - _current = null; - _queue.addAll(heads); - _current = _queue.poll(); - _next = _current; - } - - public int getParallelDepth() { - return parallelDepth; } /** @@ -310,35 +314,38 @@ public int getParallelDepth() { * @return */ @CheckForNull - public FlowNode getCurrentParallelStart() { - return currentParallelStart; + public FlowNode getCurrentParallelStartNode() { + return currentParallelStartNode; } /** * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) * @param endNode Node where parents merge (final end node for the parallel block) * @param parents Parent nodes that end here - * @return FlowNode next node to visit + * @return FlowNode myNext node to visit */ protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { - int branchesAdded = 0; BlockStartNode start = endNode.getStartNode(); - FlowNode output = null; + + ArrayDeque branches = new ArrayDeque(); for (FlowNode f : parents) { if (!blackList.contains(f)) { - // If this is the first fork, we'll walk up it, and then queue up the others - if (branchesAdded == 0) { - currentParallelStart = start; - output = f; - } else { - _queue.push(f); - forkStarts.push(start); - } - branchesAdded++; + branches.add(f); } } - if (branchesAdded > 0) { - parallelDepth++; + + FlowNode output = null; + if (branches.size() > 0) { // Push another branch start + ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start, branches.size()); + output = branches.pop(); + parallelBlockStart.remainingBranches--; + parallelBlockStart.unvisited = branches; + + if (currentParallelStart != null) { + parallelBlockStartStack.push(currentParallelStart); + } + currentParallelStart = parallelBlockStart; + currentParallelStartNode = start; } return output; } @@ -349,23 +356,32 @@ protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, */ protected FlowNode hitParallelStart() { FlowNode output = null; - if (forkStarts.size() > 0) { // More forks (or nested parallel forks) remain - FlowNode end = forkStarts.peek(); - // Nested parallel branches, finished nested level so we visit the head and enclosing parallel block - if (end != currentParallelStart) { - parallelDepth--; - output = currentParallelStart; - } - // If the current end == currentParallelStart then we are finishing another branch of current flow - currentParallelStart = end; - } else { // We're now at the top level of the flow, having finished our last (nested) parallel fork - output = currentParallelStart; - currentParallelStart = null; - parallelDepth--; + if (currentParallelStart != null) { + if (currentParallelStart.remainingBranches-- <= 1) { // Strip off a completed branch + // We finished a nested set of parallel branches, visit the head and move up a level + output = currentParallelStartNode; + + if (parallelBlockStartStack.size() > 0) { + // Finished a nested parallel block, move up a level + currentParallelStart = parallelBlockStartStack.pop(); + currentParallelStartNode = currentParallelStart.forkStart; + } else { // At the top level, not inside any parallel block + currentParallelStart = null; + currentParallelStartNode = null; + } + } + else { // We're at the top + currentParallelStart = null; + currentParallelStartNode = null; + parallelBlockStartStack.pop(); + } + } else { + throw new IllegalStateException("Hit a BlockStartNode with multiple children, and no record of the start!"); } + // Handle cases where the BlockStartNode for the parallel block is blackListed - return (output != null && !_blackList.contains(output)) ? output : null; + return (output != null && !myBlackList.contains(output)) ? output : null; } @Override @@ -379,7 +395,7 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection // welp done with this node, guess we consult the queue? } else if (parents.size() == 1) { FlowNode p = parents.get(0); - if (p == currentParallelStart) { + if (p == currentParallelStartNode) { // Terminating a parallel scan FlowNode temp = hitParallelStart(); if (temp != null) { // Startnode for current parallel block now that it is done @@ -396,12 +412,11 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection return possibleOutput; } } else { - throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+_current.toString()); + throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent.toString()); } } - if (_queue.size() > 0) { - output = _queue.pop(); - currentParallelStart = forkStarts.pop(); + if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { + output = currentParallelStart.unvisited.pop(); } return output; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index 7676339e..bc770ee5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -34,7 +34,7 @@ import java.util.List; /** - * Extension of {@link LinearScanner} that skips nested blocks at the current level. + * Extension of {@link LinearScanner} that skips nested blocks at the myCurrent level. * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow). * * This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). @@ -56,14 +56,14 @@ public class LinearBlockHoppingScanner extends LinearScanner { @Override public boolean setup(@CheckForNull Collection heads, @CheckForNull Collection blackList) { boolean possiblyStartable = super.setup(heads, blackList); - return possiblyStartable && current != null; // In case we start at an end block + return possiblyStartable && myCurrent != null; // In case we start at an end block } @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 0) { - this.current = jumpBlockScan(heads.iterator().next(), blackList); - this.next = this.current; + this.myCurrent = jumpBlockScan(heads.iterator().next(), myBlackList); + this.myNext = this.myCurrent; } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index c92c3381..95a5a549 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -46,16 +46,16 @@ public class LinearScanner extends AbstractFlowScanner { @Override protected void reset() { - this.current = null; - this.next = null; - this.blackList = Collections.EMPTY_SET; + this.myCurrent = null; + this.myNext = null; + this.myBlackList = Collections.EMPTY_SET; } @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 0) { - this.current = heads.iterator().next(); - this.next = this.current; + this.myCurrent = heads.iterator().next(); + this.myNext = this.myCurrent; } } From a1ceb70e8d15f49c577989dab1be9209df8d50a1 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 18 May 2016 18:18:48 -0400 Subject: [PATCH 033/104] Finish stripping down the ForkScanner and the core least-common-ancestor --- .../workflow/graphanalysis/ForkScanner.java | 189 +++++++----------- .../graphanalysis/FlowScannerTest.java | 66 ++++++ 2 files changed, 138 insertions(+), 117 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 3be4c285..054e48e7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -95,20 +95,23 @@ protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { this.forkStart = forkStart; this.remainingBranches = branchCount; } + + /** Strictly for internal use in the least common ancestor problem */ + ParallelBlockStart() {} } - protected static abstract class FlowPiece { - long startTime; + protected interface FlowPiece { + /*long startTime; long endTime; long pauseDuration; String statusCode; // Bounds for a block String startId; - String endId; + String endId;*/ } - protected static class FlowSegment extends FlowPiece { + protected static class FlowSegment implements FlowPiece { ArrayList visited = new ArrayList(); FlowPiece before; FlowPiece after; @@ -119,8 +122,9 @@ protected static class FlowSegment extends FlowPiece { * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments) * @param forkPoint Node where the flows intersec * @param forkBranch Flow piece that is joining this + * @return Fork where split occurred */ - public void split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode forkPoint, @Nonnull FlowPiece forkBranch) { + public Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode forkPoint, @Nonnull FlowPiece forkBranch) { int index = visited.indexOf(forkPoint); if (index < 0) { throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment"); @@ -146,6 +150,7 @@ public void split(@Nonnull HashMap nodeMapping, @Nonnull Bl nodeMapping.put(n, newSegment); } nodeMapping.put(forkPoint, newFork); + return newFork; } public void add(FlowNode f) { @@ -153,81 +158,33 @@ public void add(FlowNode f) { } } - protected static class Fork extends FlowPiece { + protected static class Fork extends ParallelBlockStart implements FlowPiece { FlowPiece before; - BlockStartNode forkNode; List following = new ArrayList(); public Fork(BlockStartNode forkNode) { - this.forkNode = forkNode; - } - } - - /** References from a branch to parent, used for creating a sorted hierarchy */ - protected static class ForkRef implements Comparable { - int depth; - FlowNode self; - FlowNode parent; - - /** Sort by depth then by parents, other than that irrelevent */ - @Override - public int compareTo(ForkRef o) { - if (o == null) { - return -1; - } - if (this.depth != o.depth) { - return (this.depth - o.depth); // Deepest first, sorting in reverse order - } - return (this.parent.getId().compareTo(o.parent.getId())); - } - - public boolean equals(Object o) { - if (o == this) { - return true; - } else if (o == null || !(o instanceof ForkRef)) { - return false; - } - return o != null && o instanceof ForkRef && ((ForkRef)o).depth == this.depth && - ((ForkRef)o).self == this.self && ((ForkRef)o).parent == this.parent; + this.forkStart = forkNode; } - protected ForkRef(int depth, FlowNode self, FlowNode parent) { - this.depth = depth; - this.self = self; - this.parent = parent; - } - } + public ParallelBlockStart toSimple() { + ParallelBlockStart st = new ParallelBlockStart(); - /** Accumulate all the branch references here, recursively */ - private void addForkRefs(List refs, Fork myFork, int currentDepth) { - List pieces = myFork.following; - for (FlowPiece f : pieces) { - FlowSegment fs = (FlowSegment)f; - refs.add(new ForkRef(currentDepth+1, fs.visited.get(fs.visited.size()-1), myFork.forkNode)); - if (fs.after != null && fs.after instanceof Fork) { - addForkRefs(refs, (Fork)fs.after, currentDepth+1); - } + return st; } } - /*private void addToRefs(List refList) { - Collections.sort(refList); - for (ForkRef fr : refList) { - // Add appropriate entries to queue, etc - } - }*/ - /** * Constructs the tree mapping each flowNode to its nearest + * * @param heads */ - void leastCommonAncestor(@Nonnull Set heads) { - // FIX ME: nest in the parallel blockstart nodes, as we see further back ones, add them on hte opposite side of pushing - + ArrayDeque leastCommonAncestor(@Nonnull Set heads) { HashMap branches = new HashMap(); ArrayList> iterators = new ArrayList>(); ArrayList liveHeads = new ArrayList(); + ArrayDeque parallelForks = new ArrayDeque(); + for (FlowNode f : heads) { iterators.add(FlowScanningUtils.filterableEnclosingBlocks(f)); FlowSegment b = new FlowSegment(); @@ -236,12 +193,12 @@ void leastCommonAncestor(@Nonnull Set heads) { branches.put(f, b); } - // Walk through until everything has merged to one ancestor + // Walk through, merging flownodes one-by-one until everything has merged to one ancestor while (iterators.size() > 1) { ListIterator> itIterator = iterators.listIterator(); ListIterator pieceIterator = liveHeads.listIterator(); - while(itIterator.hasNext()) { + while (itIterator.hasNext()) { Filterator blockStarts = itIterator.next(); FlowSegment myPiece = pieceIterator.next(); @@ -254,13 +211,13 @@ void leastCommonAncestor(@Nonnull Set heads) { FlowNode nextHead = blockStarts.next(); FlowPiece existingBranch = branches.get(nextHead); - if (existingBranch != null) { // - // Found a case where they convert, replace with a convergent branch - if (existingBranch instanceof Fork) { - Fork f = (Fork)existingBranch; + if (existingBranch != null) { // Joining into an existing branch + // Found a case where they converge, replace with a convergent branch + if (existingBranch instanceof Fork) { // Joining an existing fork with other branches + Fork f = (Fork) existingBranch; f.following.add(myPiece); - } else { - ((FlowSegment)existingBranch).split(branches, (BlockStartNode)nextHead, myPiece); + } else { // We've hit a new fork, split the segment and add it to top the parallels (it's higher than previous ones) + parallelForks.add((((FlowSegment) existingBranch).split(branches, (BlockStartNode) nextHead, myPiece))); } itIterator.remove(); pieceIterator.remove(); @@ -271,28 +228,28 @@ void leastCommonAncestor(@Nonnull Set heads) { } } - // Add the ancestry to the forks, note that we alternate fork-flowsegment-fork - ArrayList refs = new ArrayList(); - ArrayDeque children = new ArrayDeque(); - children.add((Fork)liveHeads.get(0).after); - while (children.size() > 0) { - Fork child = children.pop(); - if (child.following != null && child.following.size() > 0) { - // ad dthe fork child and its forks + // Walk through and convert forks to parallel block starts, and find heads that point to them + ArrayDeque output = new ArrayDeque(); + for (Fork f : parallelForks) { + // Do processing to assign heads to flowsegments + ParallelBlockStart start = new ParallelBlockStart(); + start.totalBranches = f.following.size(); + start.forkStart = f.forkStart; + start.remainingBranches = start.totalBranches; + + // Add the nodes to the parallel starts here + for (FlowPiece fp : f.following) { + if (fp instanceof FlowSegment) { + FlowSegment fs = (FlowSegment)fp; + if (fs.after == null) { // Ends in a head, not a fork + start.unvisited = new ArrayDeque(); + start.unvisited.add(fs.visited.get(0)); + } + } } - + output.add(start); } - Collections.sort(refs); - // Now we add start points - - // TODO : don't use sorted ForkRef, just applying the ParallelBlockStarts as we go, pushing in the tree levels - - - // FIRST: we visit all nodes on the same level, with the same parent - // Add refs to an - // Then we visit their parents - - + return output; } @Override @@ -338,6 +295,7 @@ protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, if (branches.size() > 0) { // Push another branch start ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start, branches.size()); output = branches.pop(); + parallelBlockStart.totalBranches = parents.size(); parallelBlockStart.remainingBranches--; parallelBlockStart.unvisited = branches; @@ -358,7 +316,7 @@ protected FlowNode hitParallelStart() { FlowNode output = null; if (currentParallelStart != null) { - if (currentParallelStart.remainingBranches-- <= 1) { // Strip off a completed branch + if (--(currentParallelStart.remainingBranches) <= 0) { // Strip off a completed branch // We finished a nested set of parallel branches, visit the head and move up a level output = currentParallelStartNode; @@ -370,8 +328,7 @@ protected FlowNode hitParallelStart() { currentParallelStart = null; currentParallelStartNode = null; } - } - else { // We're at the top + } else { // We're at the top level (so far) currentParallelStart = null; currentParallelStartNode = null; parallelBlockStartStack.pop(); @@ -389,36 +346,34 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection FlowNode output = null; // First we look at the parents of the current node if present - if (current != null) { - List parents = current.getParents(); - if (parents == null || parents.size() == 0) { - // welp done with this node, guess we consult the queue? - } else if (parents.size() == 1) { - FlowNode p = parents.get(0); - if (p == currentParallelStartNode) { - // Terminating a parallel scan - FlowNode temp = hitParallelStart(); - if (temp != null) { // Startnode for current parallel block now that it is done - return temp; - } - } else if (!blackList.contains(p)) { - return p; - } - } else if (current instanceof BlockEndNode && parents.size() > 1) { - // We must be a BlockEndNode that begins this - BlockEndNode end = ((BlockEndNode) current); - FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? - if (possibleOutput != null) { - return possibleOutput; + List parents = current.getParents(); + if (parents == null || parents.size() == 0) { + // welp done with this node, guess we consult the queue? + } else if (parents.size() == 1) { + FlowNode p = parents.get(0); + if (p == currentParallelStartNode) { + // Terminating a parallel scan + FlowNode temp = hitParallelStart(); + if (temp != null) { // Startnode for current parallel block now that it is done + return temp; } - } else { - throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent.toString()); + } else if (!blackList.contains(p)) { + return p; } + } else if (current instanceof BlockEndNode && parents.size() > 1) { + // We must be a BlockEndNode that begins this + BlockEndNode end = ((BlockEndNode) current); + FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? + if (possibleOutput != null) { + return possibleOutput; + } + } else { + throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent.toString()); } + if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { output = currentParallelStart.unvisited.pop(); } - return output; } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 1889e315..061a71fe 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -425,4 +425,70 @@ public void testNestedParallelScan() throws Exception { matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(7, matches.size()); } + + /** Unit tests for the innards of the ForkScanner */ + @Test + public void testForkedScanner() throws Exception { + + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); + job.setDefinition(new CpsFlowDefinition( + "echo 'first'\n" + + "def steps = [:]\n" + + "steps['1'] = {\n" + + " echo 'do 1 stuff'\n" + + "}\n" + + "steps['2'] = {\n" + + " echo '2a'\n" + + " echo '2b'\n" + + "}\n" + + "parallel steps\n" + + "echo 'final'" + )); + + /** Flow structure (ID - type) + 2 - FlowStartNode (BlockStartNode) + 3 - Echostep + 4 - ParallelStep (StepStartNode) (start branches) + 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 + 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 + 8 - EchoStep, (branch 1) parent=6 + 9 - StepEndNode, (end branch 1) startId=6, parentId=8 + 10 - EchoStep, (branch 2) parentId=7 + 11 - EchoStep, (branch 2) parentId = 10 + 12 - StepEndNode (end branch 2) startId=7 parentId=11, + 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 + 14 - EchoStep + 15 - FlowEndNode (BlockEndNode) + */ + + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + FlowExecution exec = b.getExecution(); + Collection heads = b.getExecution().getCurrentHeads(); + + // Initial case + ForkScanner scanner = new ForkScanner(); + scanner.setup(heads, null); + Assert.assertNull(scanner.currentParallelStart); + Assert.assertNull(scanner.currentParallelStartNode); + Assert.assertNotNull(scanner.parallelBlockStartStack); + Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); + Assert.assertTrue(scanner.isWalkingFromFinish()); + + // Fork case + scanner.setup(exec.getNode("13")); + Assert.assertFalse(scanner.isWalkingFromFinish()); + Assert.assertEquals("13", scanner.next().getId()); + Assert.assertNotNull(scanner.parallelBlockStartStack); + Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); + Assert.assertEquals(exec.getNode("4"), scanner.currentParallelStartNode); + + ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; + Assert.assertEquals(2, start.totalBranches); + Assert.assertEquals(2, start.remainingBranches); + Assert.assertEquals(0, start.unvisited.size()); + Assert.assertEquals(exec.getNode("4"), start.forkStart); + + Assert.assertEquals(exec.getNode("9"), scanner.next()); + + } } \ No newline at end of file From 11be993d4c14363e7d8287594c73b24b817fd0b3 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 18 May 2016 18:35:28 -0400 Subject: [PATCH 034/104] Somewhat better behaved version of ForkScanner, just need to fix blacklisting --- .../plugins/workflow/graphanalysis/ForkScanner.java | 6 +----- .../workflow/graphanalysis/FlowScannerTest.java | 11 ++++++++--- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 054e48e7..50f9b246 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -316,7 +316,7 @@ protected FlowNode hitParallelStart() { FlowNode output = null; if (currentParallelStart != null) { - if (--(currentParallelStart.remainingBranches) <= 0) { // Strip off a completed branch + if ((currentParallelStart.remainingBranches--) <= 0) { // Strip off a completed branch // We finished a nested set of parallel branches, visit the head and move up a level output = currentParallelStartNode; @@ -328,10 +328,6 @@ protected FlowNode hitParallelStart() { currentParallelStart = null; currentParallelStartNode = null; } - } else { // We're at the top level (so far) - currentParallelStart = null; - currentParallelStartNode = null; - parallelBlockStartStack.pop(); } } else { throw new IllegalStateException("Hit a BlockStartNode with multiple children, and no record of the start!"); diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 061a71fe..d592f856 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -26,6 +26,7 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; +import com.sun.tools.javac.comp.Flow; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import org.jenkinsci.plugins.workflow.flow.FlowExecution; @@ -484,11 +485,15 @@ public void testForkedScanner() throws Exception { ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; Assert.assertEquals(2, start.totalBranches); - Assert.assertEquals(2, start.remainingBranches); - Assert.assertEquals(0, start.unvisited.size()); + Assert.assertEquals(1, start.remainingBranches); + Assert.assertEquals(1, start.unvisited.size()); Assert.assertEquals(exec.getNode("4"), start.forkStart); Assert.assertEquals(exec.getNode("9"), scanner.next()); - + Assert.assertEquals(exec.getNode("8"), scanner.next()); + Assert.assertEquals(exec.getNode("6"), scanner.next()); + FlowNode f = scanner.next(); + Assert.assertEquals(exec.getNode("12"), f); + } } \ No newline at end of file From b1f09761c7c28a6a4ccc2350553ce5b19e10c613 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 19 May 2016 19:54:54 -0400 Subject: [PATCH 035/104] Fix blacklisting, remove unused fields of ForkScanner objects --- .../workflow/graphanalysis/AbstractFlowScanner.java | 2 +- .../plugins/workflow/graphanalysis/ForkScanner.java | 13 +------------ .../workflow/graphanalysis/FlowScannerTest.java | 7 ------- 3 files changed, 2 insertions(+), 20 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index aeb830e3..36282a35 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -121,7 +121,7 @@ public boolean setup(@CheckForNull Collection heads, @CheckForNull Col } reset(); - blackList = fastEndNodes; + myBlackList = fastEndNodes; setHeads(filteredHeads); return true; } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 50f9b246..659b54f6 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -101,19 +101,11 @@ protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { } protected interface FlowPiece { - /*long startTime; - long endTime; - long pauseDuration; - String statusCode; - - // Bounds for a block - String startId; - String endId;*/ + // Marker interface for now } protected static class FlowSegment implements FlowPiece { ArrayList visited = new ArrayList(); - FlowPiece before; FlowPiece after; /** @@ -135,11 +127,9 @@ public Fork split(@Nonnull HashMap nodeMapping, @Nonnull Bl Fork newFork = new Fork(forkPoint); FlowSegment newSegment = new FlowSegment(); newSegment.after = this.after; - newSegment.before = newFork; if (visited.size() > index+1) { newSegment.visited.addAll(index+1, visited); } - newFork.before = this; newFork.following.add(forkBranch); newFork.following.add(newSegment); this.after = newFork; @@ -159,7 +149,6 @@ public void add(FlowNode f) { } protected static class Fork extends ParallelBlockStart implements FlowPiece { - FlowPiece before; List following = new ArrayList(); public Fork(BlockStartNode forkNode) { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index d592f856..06783f73 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -26,16 +26,10 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; -import com.sun.tools.javac.comp.Flow; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.FlowNode; -import org.jenkinsci.plugins.workflow.graphanalysis.DepthFirstScanner; -import org.jenkinsci.plugins.workflow.graphanalysis.FlowNodeVisitor; -import org.jenkinsci.plugins.workflow.graphanalysis.ForkScanner; -import org.jenkinsci.plugins.workflow.graphanalysis.LinearBlockHoppingScanner; -import org.jenkinsci.plugins.workflow.graphanalysis.LinearScanner; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.jenkinsci.plugins.workflow.steps.StepDescriptor; @@ -45,7 +39,6 @@ import org.junit.Test; import org.jvnet.hudson.test.BuildWatcher; import org.jvnet.hudson.test.JenkinsRule; -import org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner; import javax.annotation.Nonnull; import java.util.ArrayList; From e598a91b965a87ff3abdad2c899a4d2114cbbe05 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 24 May 2016 10:43:19 -0400 Subject: [PATCH 036/104] Add suggested review API, add a missing NotNull, explain the null handling behavior in the abstract APIs --- .../graphanalysis/AbstractFlowScanner.java | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 36282a35..bb8ddc6d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -103,6 +103,7 @@ protected Collection convertToFastCheckable(@CheckForNull Collection heads, @CheckForNull Col return true; } + /** + * Helper: version of {@link #setup(Collection, Collection)} where we don't have any nodes to blacklist + */ + public boolean setup(@CheckForNull Collection heads) { + if (heads == null) { + return false; + } + return setup(heads, Collections.EMPTY_SET); + } + /** * Helper: version of {@link #setup(Collection, Collection)} where we don't have any nodes to blacklist, and have just a single head */ @@ -205,12 +216,13 @@ public Iterator iterator() { */ @Override @Nonnull - public Filterator filter(Predicate filterCondition) { + public Filterator filter(@Nonnull Predicate filterCondition) { return new FilteratorImpl(this, filterCondition); } /** * Find the first FlowNode within the iteration order matching a given condition + * Includes null-checking on arguments to allow directly calling with unchecked inputs (simplifies use). * @param heads Head nodes to start walking from * @param endNodes * @param matchCondition Predicate to match when we've successfully found a given node type @@ -257,7 +269,8 @@ public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predic /** * Return a filtered list of {@link FlowNode}s matching a condition, in the order encountered. - * @param heads Nodes to start iterating backward from by visiting their parents + * Includes null-checking on arguments to allow directly calling with unchecked inputs (simplifies use). + * @param heads Nodes to start iterating backward from by visiting their parents. * @param blackList Nodes we may not visit or walk beyond. * @param matchCondition Predicate that must be met for nodes to be included in output. * @return List of flownodes matching the predicate. @@ -291,9 +304,9 @@ public List filteredNodes(@CheckForNull FlowNode head, @Nonnull Predic return this.filteredNodes(Collections.singleton(head), null, matchPredicate); } - /** * Given a {@link FlowNodeVisitor}, invoke {@link FlowNodeVisitor#visit(FlowNode)} on each node and halt early if it returns false. + * Includes null-checking on all but the visitor, to allow directly calling with unchecked inputs (simplifies use). * * Useful if you wish to collect some information from every node in the FlowGraph. * To do that, accumulate internal state in the visitor, and invoke a getter when complete. From 7173c9c2a909b6d9a10edef7039472f2d0fc23f3 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 24 May 2016 10:44:49 -0400 Subject: [PATCH 037/104] LinearFlowScanner test is deterministic --- .../plugins/workflow/graphanalysis/FlowScannerTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 06783f73..259d5626 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -341,7 +341,7 @@ public void testParallelScan() throws Exception { AbstractFlowScanner scanner = new LinearScanner(); Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); - Assert.assertTrue(matches.size() == 3 || matches.size() == 4); // Depending on ordering + Assert.assertEquals(3, matches.size()); scanner = new DepthFirstScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); From 27070594cd672192bfbf5485f1a678deb35f11cc Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 24 May 2016 22:54:09 -0400 Subject: [PATCH 038/104] Add megatest for FlowScanner abstract functionality This allows implementation tests to mostly cover iteration/blacklist use --- .../graphanalysis/AbstractFlowScanner.java | 5 +- .../graphanalysis/FlowScannerTest.java | 191 ++++++++++++++++-- 2 files changed, 176 insertions(+), 20 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index bb8ddc6d..68d0fa47 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -88,6 +88,9 @@ public abstract class AbstractFlowScanner implements Iterable , Filter protected Collection myBlackList = Collections.EMPTY_SET; + /** When checking for blacklist membership, we convert to a hashset when checking more than this many elements */ + protected final int MAX_LIST_CHECK_SIZE = 5; + /** Helper: convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ @Nonnull protected Collection convertToFastCheckable(@CheckForNull Collection nodeCollection) { @@ -98,7 +101,7 @@ protected Collection convertToFastCheckable(@CheckForNull Collection 5 ? new HashSet(nodeCollection) : nodeCollection; + return nodeCollection.size() > MAX_LIST_CHECK_SIZE ? new HashSet(nodeCollection) : nodeCollection; } /** diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 259d5626..f46b0139 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -41,11 +41,16 @@ import org.jvnet.hudson.test.JenkinsRule; import javax.annotation.Nonnull; +import java.util.AbstractSet; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; +import java.util.NoSuchElementException; +import java.util.TreeSet; public class FlowScannerTest { @@ -89,6 +94,167 @@ public ArrayList getVisited() { } }; + /** Assert node ordering using their ids */ + public void assertNodeOrder(String description, Iterable nodes, String... nodeIds) { + ArrayList realIds = new ArrayList(); + for (FlowNode f: nodes) { + Assert.assertNotNull(f); + realIds.add(f.getId()); + } + Assert.assertArrayEquals(description, nodeIds, realIds.toArray()); + } + + /** Assert node ordering using iotas for their ids */ + public void assertNodeOrder(String description, Iterable nodes, int... nodeIds) { + String[] nodeIdStrings = new String[nodeIds.length]; + for (int i=0; i heads = exec.getCurrentHeads(); + FlowNode intermediateNode = exec.getNode("4"); + AbstractFlowScanner linear = new LinearScanner(); + + // ## Bunch of tests for convertToFastCheckable ## + Collection coll = linear.convertToFastCheckable(null); + Assert.assertEquals(Collections.EMPTY_SET, linear.convertToFastCheckable(null)); + Assert.assertEquals(Collections.EMPTY_SET, linear.convertToFastCheckable(new ArrayList())); + + coll = linear.convertToFastCheckable(Arrays.asList(intermediateNode)); + Assert.assertTrue("Singleton set used for one element", coll instanceof AbstractSet); + Assert.assertEquals(1, coll.size()); + + Collection multipleItems = Arrays.asList(exec.getNode("3"), exec.getNode("2")); + coll = linear.convertToFastCheckable(multipleItems); + Assert.assertTrue("Original used for short list", coll instanceof List); + Assert.assertEquals(2, coll.size()); + + coll = linear.convertToFastCheckable(new LinkedHashSet(multipleItems)); + Assert.assertTrue("Original used where set", coll instanceof LinkedHashSet); + + multipleItems = new ArrayList(); + for (int i=0; i < 3; i++) { + multipleItems.add(intermediateNode); + } + coll = linear.convertToFastCheckable(multipleItems); + Assert.assertTrue("Original used for short list", coll instanceof List); + Assert.assertEquals(3, coll.size()); + + multipleItems = new ArrayList(); + for (int i=0; i < 10; i++) { + multipleItems.add(intermediateNode); + } + coll = linear.convertToFastCheckable(multipleItems); + Assert.assertTrue("Original used for short list", coll instanceof HashSet); + Assert.assertEquals(1, coll.size()); + + + // Setup, return false if no nodes to iterate, else true + FlowNode lastNode = heads.get(0); + FlowNode nullNode = null; + Collection nullColl = null; + + Assert.assertTrue(linear.setup(heads, null)); + Assert.assertTrue(linear.setup(heads, Collections.EMPTY_SET)); + Assert.assertFalse(linear.setup(nullColl, heads)); + Assert.assertFalse(linear.setup(nullColl, null)); + Assert.assertFalse(linear.setup(heads, heads)); + Assert.assertTrue(linear.setup(heads)); + Assert.assertFalse(linear.setup(nullColl)); + Assert.assertFalse(linear.setup(Collections.EMPTY_SET)); + Assert.assertTrue(linear.setup(lastNode)); + Assert.assertTrue(linear.setup(lastNode, nullColl)); + Assert.assertFalse(linear.setup(nullNode)); + Assert.assertFalse(linear.setup(nullNode, heads)); + Assert.assertFalse(linear.setup(nullNode, nullColl)); + Assert.assertTrue(linear.setup(Arrays.asList(intermediateNode, lastNode), Collections.singleton(intermediateNode))); + Assert.assertEquals(lastNode, linear.myCurrent); + + // First match, with no blacklist + int[] ids = {6, 5, 4, 3, 2}; + FlowNode firstEchoNode = exec.getNode("5"); + FlowExecution nullExecution = null; + + Assert.assertEquals(firstEchoNode, linear.findFirstMatch(heads, Collections.EMPTY_LIST, MATCH_ECHO_STEP)); + Assert.assertEquals(firstEchoNode, linear.findFirstMatch(heads, MATCH_ECHO_STEP)); + Assert.assertEquals(firstEchoNode, linear.findFirstMatch(lastNode, MATCH_ECHO_STEP)); + Assert.assertEquals(firstEchoNode, linear.findFirstMatch(exec, MATCH_ECHO_STEP)); + Assert.assertEquals(null, linear.findFirstMatch(nullColl, MATCH_ECHO_STEP)); + Assert.assertEquals(null, linear.findFirstMatch(Collections.EMPTY_SET, MATCH_ECHO_STEP)); + Assert.assertEquals(null, linear.findFirstMatch(nullNode, MATCH_ECHO_STEP)); + Assert.assertEquals(null, linear.findFirstMatch(nullExecution, MATCH_ECHO_STEP)); + + + // Filtered nodes + assertNodeOrder("Filtered echo nodes", linear.filteredNodes(heads, MATCH_ECHO_STEP), 5, 4); + assertNodeOrder("Filtered echo nodes", linear.filteredNodes(heads, Collections.singleton(intermediateNode), MATCH_ECHO_STEP), 5); + Assert.assertEquals(0, linear.filteredNodes(heads, null, (Predicate) Predicates.alwaysFalse()).size()); + Assert.assertEquals(0, linear.filteredNodes(nullNode, MATCH_ECHO_STEP).size()); + Assert.assertEquals(0, linear.filteredNodes(Collections.EMPTY_SET, MATCH_ECHO_STEP).size()); + + // Same filter using the filterator + linear.setup(heads); + ArrayList collected = new ArrayList(); + Filterator filt = linear.filter(MATCH_ECHO_STEP); + while (filt.hasNext()) { + collected.add(filt.next()); + } + assertNodeOrder("Filterator filtered echo nodes", collected, 5, 4); + + + // Visitor pattern tests + CollectingVisitor visitor = new CollectingVisitor(); + linear.visitAll(Collections.EMPTY_SET, null); + Assert.assertEquals(0, visitor.getVisited().size()); + + linear.visitAll(heads, visitor); + assertNodeOrder("Visiting all nodes", visitor.getVisited(), 6, 5, 4, 3, 2); + + // And visiting with blacklist + visitor.visited.clear(); + linear.visitAll(heads, Collections.singleton(intermediateNode), visitor); + assertNodeOrder("Visiting all nodes with blacklist", visitor.getVisited(), 6, 5); + + // Tests for edge cases of the various basic APIs + linear.myNext = null; + Assert.assertFalse(linear.hasNext()); + try { + linear.next(); + Assert.fail("Should throw NoSuchElement exception"); + } catch (NoSuchElementException nsee) { + // Passing case + } + Assert.assertTrue(linear.iterator() == linear); + try { + linear.remove(); + Assert.fail("Should throw UnsupportedOperation exception"); + } catch (UnsupportedOperationException usoe) { + // Passing case + } + } + /** Tests the basic scan algorithm, predicate use, start/stop nodes */ @Test public void testSimpleScan() throws Exception { @@ -118,38 +284,25 @@ public void testSimpleScan() throws Exception { // Iteration tests for (AbstractFlowScanner scan : scans) { - System.out.println("Iteration test with scanner: "+scan.getClass()); + System.out.println("Iteration test with scanner: " + scan.getClass()); scan.setup(heads, null); - - for (int i=6; i>2; i--) { - Assert.assertTrue(scan.hasNext()); - FlowNode f = scan.next(); - Assert.assertEquals(Integer.toString(i), f.getId()); - } - - FlowNode f2 = scan.next(); + assertNodeOrder("Testing linear scan for scanner " + scan.getClass(), scan, 6, 5, 4, 3, 2); Assert.assertFalse(scan.hasNext()); - Assert.assertEquals("2", f2.getId()); + + // Blacklist tests } - // Block Hopping tests + // Block Hopping tests, since they're a specialty LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); Assert.assertFalse("BlockHopping scanner jumps over the flow when started at end", scanner.setup(heads, Collections.EMPTY_SET)); List collectedNodes = scanner.filteredNodes(Collections.singleton(exec.getNode("5")), null, (Predicate)Predicates.alwaysTrue()); - Assert.assertEquals(exec.getNode("5"), collectedNodes.get(0)); - Assert.assertEquals(exec.getNode("4"), collectedNodes.get(1)); - Assert.assertEquals(exec.getNode("3"), collectedNodes.get(2)); - Assert.assertEquals(exec.getNode("2"), collectedNodes.get(3)); + assertNodeOrder("Block hopping from just inside the end", collectedNodes, 5, 4, 3, 2); // Test expected scans with no stop nodes given (different ways of specifying none) for (AbstractFlowScanner sa : scans) { System.out.println("Testing class: "+sa.getClass()); FlowNode node = sa.findFirstMatch(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(exec.getNode("5"), node); - node = sa.findFirstMatch(heads, Collections.EMPTY_LIST, MATCH_ECHO_STEP); - Assert.assertEquals(exec.getNode("5"), node); - node = sa.findFirstMatch(heads, Collections.EMPTY_SET, MATCH_ECHO_STEP); - Assert.assertEquals(exec.getNode("5"), node); Collection nodeList = sa.filteredNodes(heads, null, MATCH_ECHO_STEP); FlowNode[] expected = new FlowNode[]{exec.getNode("5"), exec.getNode("4")}; From 21c6623ed465e9cc54261f6c2711ebf5624cb40c Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 24 May 2016 23:58:15 -0400 Subject: [PATCH 039/104] Harden the flowscanner tests for ordering, clean up --- .../graphanalysis/FlowScannerTest.java | 180 ++++-------------- 1 file changed, 40 insertions(+), 140 deletions(-) diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index f46b0139..09a15e51 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -286,74 +286,14 @@ public void testSimpleScan() throws Exception { for (AbstractFlowScanner scan : scans) { System.out.println("Iteration test with scanner: " + scan.getClass()); scan.setup(heads, null); - assertNodeOrder("Testing linear scan for scanner " + scan.getClass(), scan, 6, 5, 4, 3, 2); + assertNodeOrder("Testing full scan for scanner " + scan.getClass(), scan, 6, 5, 4, 3, 2); Assert.assertFalse(scan.hasNext()); // Blacklist tests - } - - // Block Hopping tests, since they're a specialty - LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); - Assert.assertFalse("BlockHopping scanner jumps over the flow when started at end", scanner.setup(heads, Collections.EMPTY_SET)); - List collectedNodes = scanner.filteredNodes(Collections.singleton(exec.getNode("5")), null, (Predicate)Predicates.alwaysTrue()); - assertNodeOrder("Block hopping from just inside the end", collectedNodes, 5, 4, 3, 2); - - // Test expected scans with no stop nodes given (different ways of specifying none) - for (AbstractFlowScanner sa : scans) { - System.out.println("Testing class: "+sa.getClass()); - FlowNode node = sa.findFirstMatch(heads, null, MATCH_ECHO_STEP); - Assert.assertEquals(exec.getNode("5"), node); - - Collection nodeList = sa.filteredNodes(heads, null, MATCH_ECHO_STEP); - FlowNode[] expected = new FlowNode[]{exec.getNode("5"), exec.getNode("4")}; - Assert.assertArrayEquals(expected, nodeList.toArray()); - nodeList = sa.filteredNodes(heads, Collections.EMPTY_LIST, MATCH_ECHO_STEP); - Assert.assertArrayEquals(expected, nodeList.toArray()); - nodeList = sa.filteredNodes(heads, Collections.EMPTY_SET, MATCH_ECHO_STEP); - Assert.assertArrayEquals(expected, nodeList.toArray()); - } - - // Test with no matches - for (AbstractFlowScanner sa : scans) { - System.out.println("Testing class: "+sa.getClass()); - FlowNode node = sa.findFirstMatch(heads, null, (Predicate)Predicates.alwaysFalse()); - Assert.assertNull(node); - - Collection nodeList = sa.filteredNodes(heads, null, (Predicate) Predicates.alwaysFalse()); - Assert.assertNotNull(nodeList); - Assert.assertEquals(0, nodeList.size()); - } - - - CollectingVisitor vis = new CollectingVisitor(); - // Verify we touch head and foot nodes too - for (AbstractFlowScanner sa : scans) { - System.out.println("Testing class: " + sa.getClass()); - Collection nodeList = sa.filteredNodes(heads, null, (Predicate) Predicates.alwaysTrue()); - vis.reset(); - sa.visitAll(heads, vis); - Assert.assertEquals(5, nodeList.size()); - Assert.assertEquals(5, vis.getVisited().size()); - } - - // Test with a stop node given, sometimes no matches - Collection noMatchEndNode = Collections.singleton(exec.getNode("5")); - Collection singleMatchEndNode = Collections.singleton(exec.getNode("4")); - for (AbstractFlowScanner sa : scans) { - FlowNode node = sa.findFirstMatch(heads, noMatchEndNode, MATCH_ECHO_STEP); - Assert.assertNull(node); - - Collection nodeList = sa.filteredNodes(heads, noMatchEndNode, MATCH_ECHO_STEP); - Assert.assertNotNull(nodeList); - Assert.assertEquals(0, nodeList.size()); - - // Now we try with a stop list the reduces node set for multiple matches - node = sa.findFirstMatch(heads, singleMatchEndNode, MATCH_ECHO_STEP); - Assert.assertEquals(exec.getNode("5"), node); - nodeList = sa.filteredNodes(heads, singleMatchEndNode, MATCH_ECHO_STEP); - Assert.assertNotNull(nodeList); - Assert.assertEquals(1, nodeList.size()); - Assert.assertEquals(exec.getNode("5"), nodeList.iterator().next()); + scan.setup(heads, Collections.singleton(exec.getNode("4"))); + assertNodeOrder("Testing full scan for scanner " + scan.getClass(), scan, 6, 5); + FlowNode f = scan.findFirstMatch(heads, Collections.singleton(exec.getNode("6")), (Predicate)Predicates.alwaysTrue()); + Assert.assertNull(f); } } @@ -385,72 +325,35 @@ public void testBasicScanWithBlock() throws Exception { WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); Predicate matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); FlowExecution exec = b.getExecution(); + Collection heads = exec.getCurrentHeads(); // Linear analysis LinearScanner linearScanner = new LinearScanner(); - Assert.assertEquals(3, linearScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); - Assert.assertEquals(3, linearScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); + linearScanner.setup(heads); + assertNodeOrder("Linear scan with block", linearScanner, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2); + linearScanner.setup(exec.getNode("7")); + assertNodeOrder("Linear scan with block from middle ", linearScanner, 7, 6, 5, 4, 3, 2); - // Test blockhopping LinearBlockHoppingScanner linearBlockHoppingScanner = new LinearBlockHoppingScanner(); - Assert.assertEquals(0, linearBlockHoppingScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); //Hopped - Assert.assertEquals(1, linearBlockHoppingScanner.filteredNodes(exec.getNode("8"), matchEchoStep).size()); - Assert.assertEquals(3, linearBlockHoppingScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); - - // Prove we covered all - DepthFirstScanner depthFirstScanner = new DepthFirstScanner(); - Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); - Assert.assertEquals(3, depthFirstScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); - - // Prove we covered all - ForkScanner forkScanner = new ForkScanner(); - Assert.assertEquals(3, forkScanner.filteredNodes(exec.getCurrentHeads(), null, matchEchoStep).size()); - Assert.assertEquals(3, forkScanner.filteredNodes(exec.getNode("7"), matchEchoStep).size()); - } - - @Test - public void blockJumpTest() throws Exception { - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "BlockUsing"); - job.setDefinition(new CpsFlowDefinition( - "echo 'sample'\n" + - "node {\n" + - " echo 'inside node' \n" + - "}" - )); - - /** Flow structure (ID - type) - 2 - FlowStartNode (BlockStartNode) - 3 - Echostep - 4 - ExecutorStep (StepStartNode) - WorkspaceAction - 5 - ExecutorStep (StepStartNode) - BodyInvocationAction - 6 - Echostep - 7 - StepEndNode - startId (5) - 8 - StepEndNode - startId (4) - 9 - FlowEndNode - */ - - WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - Collection heads = b.getExecution().getCurrentHeads(); - FlowExecution exec = b.getExecution(); - - LinearBlockHoppingScanner hopper = new LinearBlockHoppingScanner(); - FlowNode headCandidate = exec.getNode("7"); - Assert.assertEquals(exec.getNode("4"), hopper.jumpBlockScan(headCandidate, Collections.EMPTY_SET)); - Assert.assertTrue("Setup should return true if we can iterate", hopper.setup(headCandidate, null)); - - headCandidate = exec.getNode("6"); - List filtered = hopper.filteredNodes(headCandidate, MATCH_ECHO_STEP); - Assert.assertEquals(2, filtered.size()); - - headCandidate = exec.getNode("7"); - filtered = hopper.filteredNodes(Collections.singleton(headCandidate), null, MATCH_ECHO_STEP); - Assert.assertEquals(1, filtered.size()); - filtered = hopper.filteredNodes(Collections.singleton(exec.getNode("8")), null, MATCH_ECHO_STEP); - Assert.assertEquals(1, filtered.size()); - - filtered = hopper.filteredNodes(Collections.singleton(exec.getNode("9")), null, MATCH_ECHO_STEP); - Assert.assertEquals(0, filtered.size()); + // // Test block jump core + FlowNode headCandidate = exec.getNode("8"); + Assert.assertEquals(exec.getNode("4"), linearBlockHoppingScanner.jumpBlockScan(headCandidate, Collections.EMPTY_SET)); + Assert.assertTrue("Setup should return true if we can iterate", linearBlockHoppingScanner.setup(headCandidate, null)); + + // Test the actual iteration + linearBlockHoppingScanner.setup(heads); + Assert.assertFalse(linearBlockHoppingScanner.hasNext()); + linearBlockHoppingScanner.setup(exec.getNode("8")); + assertNodeOrder("Hopping over one block", linearBlockHoppingScanner, 4, 3, 2); + linearBlockHoppingScanner.setup(exec.getNode("7")); + assertNodeOrder("Hopping over one block", linearBlockHoppingScanner, 7, 6, 5, 4, 3, 2); + + // Test the black list in combination with hopping + linearBlockHoppingScanner.setup(exec.getNode("8"), Collections.singleton(exec.getNode("5"))); + Assert.assertFalse(linearBlockHoppingScanner.hasNext()); + linearBlockHoppingScanner.setup(exec.getNode("8"), Collections.singleton(exec.getNode("4"))); + Assert.assertFalse(linearBlockHoppingScanner.hasNext()); } @@ -493,25 +396,23 @@ public void testParallelScan() throws Exception { Collection heads = b.getExecution().getCurrentHeads(); AbstractFlowScanner scanner = new LinearScanner(); - Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); - Assert.assertEquals(3, matches.size()); + scanner.setup(heads); + assertNodeOrder("Linear", scanner, 15, 14, 13, 9, 8, 6, 4, 3, 2); + scanner.setup(heads, Collections.singleton(exec.getNode("9"))); + assertNodeOrder("Linear", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2); - scanner = new DepthFirstScanner(); - matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); - Assert.assertEquals(5, matches.size()); - // Block hopping scanner - scanner = new LinearBlockHoppingScanner(); - matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); - Assert.assertEquals(0, matches.size()); - - matches = scanner.filteredNodes(Collections.singleton(b.getExecution().getNode("14")), MATCH_ECHO_STEP); - Assert.assertEquals(2, matches.size()); + // Depth first scanner and with blacklist + scanner = new DepthFirstScanner(); + scanner.setup(heads); + assertNodeOrder("Depth first", scanner, 15, 14, 13, 9, 8, 6, 4, 3, 2, 12, 11, 10, 7); + scanner.setup(heads, Collections.singleton(exec.getNode("9"))); + assertNodeOrder("Linear", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2); // We're going to test the ForkScanner in more depth since this is its natural use scanner = new ForkScanner(); - matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); - Assert.assertEquals(5, matches.size()); + scanner.setup(heads); + assertNodeOrder("ForkedScanner", scanner, 15, 14, 13, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2); /*ArrayList forkedHeads = new ArrayList(); forkedHeads.add(exec.getNode("9")); @@ -640,6 +541,5 @@ public void testForkedScanner() throws Exception { Assert.assertEquals(exec.getNode("6"), scanner.next()); FlowNode f = scanner.next(); Assert.assertEquals(exec.getNode("12"), f); - } } \ No newline at end of file From 13ec682bb502f89a9ff1103ee135d44c7b5a7e3b Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 25 May 2016 13:31:05 -0400 Subject: [PATCH 040/104] Refactor forkscanner, create dedicated tests, fix splitting --- .../graphanalysis/AbstractFlowScanner.java | 2 +- .../workflow/graphanalysis/ForkScanner.java | 148 ++++++---- .../graphanalysis/FlowScannerTest.java | 162 ++--------- .../workflow/graphanalysis/FlowTestUtils.java | 108 +++++++ .../graphanalysis/ForkScannerTest.java | 263 ++++++++++++++++++ 5 files changed, 485 insertions(+), 198 deletions(-) create mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java create mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 68d0fa47..6fda5e93 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -89,7 +89,7 @@ public abstract class AbstractFlowScanner implements Iterable , Filter protected Collection myBlackList = Collections.EMPTY_SET; /** When checking for blacklist membership, we convert to a hashset when checking more than this many elements */ - protected final int MAX_LIST_CHECK_SIZE = 5; + protected static final int MAX_LIST_CHECK_SIZE = 5; /** Helper: convert stop nodes to a collection that can efficiently be checked for membership, handling null if needed */ @Nonnull diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 659b54f6..abbf509a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -100,11 +100,11 @@ protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { ParallelBlockStart() {} } - protected interface FlowPiece { - // Marker interface for now + interface FlowPiece { + // Marker interface for now, so we don't just inherit from object } - protected static class FlowSegment implements FlowPiece { + static class FlowSegment implements FlowPiece { ArrayList visited = new ArrayList(); FlowPiece after; @@ -117,27 +117,33 @@ protected static class FlowSegment implements FlowPiece { * @return Fork where split occurred */ public Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode forkPoint, @Nonnull FlowPiece forkBranch) { - int index = visited.indexOf(forkPoint); + int index = visited.lastIndexOf(forkPoint); // Fork will be closer to end, so this is better than indexOf + Fork newFork = new Fork(forkPoint); if (index < 0) { throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment"); - } + } else if (index == this.visited.size()-1) { // We forked just off the end + newFork.following.add(this); + newFork.following.add(forkBranch); + this.visited.remove(index); + } else { // Splitting at some midpoint within the segment, everything before becomes part of the following + // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it + // to a new segment and add that to the fork + FlowSegment newSegment = new FlowSegment(); + newSegment.after = this.after; + + if (index < visited.size()) { + newSegment.visited.addAll(this.visited.subList(0, index)); + } + newFork.following.add(newSegment); + newFork.following.add(forkBranch); + this.after = newFork; - // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it - // to a new segment and add that to the fork - Fork newFork = new Fork(forkPoint); - FlowSegment newSegment = new FlowSegment(); - newSegment.after = this.after; - if (visited.size() > index+1) { - newSegment.visited.addAll(index+1, visited); - } - newFork.following.add(forkBranch); - newFork.following.add(newSegment); - this.after = newFork; - - // Remove the nodes after the split, and remap the fork points - this.visited.subList(index,visited.size()-1).clear(); - for (FlowNode n : newSegment.visited) { - nodeMapping.put(n, newSegment); + // Remove the part before the fork point + + this.visited.subList(0, index+1).clear(); + for (FlowNode n : newSegment.visited) { + nodeMapping.put(n, newSegment); + } } nodeMapping.put(forkPoint, newFork); return newFork; @@ -148,7 +154,8 @@ public void add(FlowNode f) { } } - protected static class Fork extends ParallelBlockStart implements FlowPiece { + /** Internal class used for constructing the LeastCommonAncestor structure */ + static class Fork extends ParallelBlockStart implements FlowPiece { List following = new ArrayList(); public Fork(BlockStartNode forkNode) { @@ -162,9 +169,62 @@ public ParallelBlockStart toSimple() { } } - /** - * Constructs the tree mapping each flowNode to its nearest + /** Subcomponent of least-common-ancestor: check for merge of branches + * Pulled out to allow for unit testing, and to simplify logic. * + * Basically this looks to see if a branch intersects an existing one (where a node points to an existing FlowPiece) + * If they intersect, the branch is merged onto the existing one, splitting it and creating a fork if needed. + * Otherwise, it gets a new FlowNode added in + * @return true if the next node from myPiece merged with an existing branch, false if we just added another head + */ + boolean checkForMerge(final HashMap branches, FlowSegment myPiece, FlowNode nextHead, ArrayDeque parallelForks) { + FlowPiece existingBranch = branches.get(nextHead); + if (existingBranch != null) { // Joining into an existing branch + // Found a case where they converge, replace with a convergent branch + if (existingBranch instanceof Fork) { // Joining an existing fork with other branches + Fork f = (Fork) existingBranch; + f.following.add(myPiece); + } else { // We've hit a new fork, split the segment and add it to top the parallels (it's higher than previous ones) + parallelForks.add((((FlowSegment) existingBranch).split(branches, (BlockStartNode) nextHead, myPiece))); + } + return true; + } else { + myPiece.add(nextHead); + branches.put(nextHead, myPiece); + return false; + } + } + + /** Does a conversion of the fork container class to a set of block starts */ + ArrayDeque convertForksToBlockStarts(ArrayDeque parallelForks) { + // Walk through and convert forks to parallel block starts, and find heads that point to them + ArrayDeque output = new ArrayDeque(); + for (Fork f : parallelForks) { + // Do processing to assign heads to flowsegments + ParallelBlockStart start = new ParallelBlockStart(); + start.totalBranches = f.following.size(); + start.forkStart = f.forkStart; + start.remainingBranches = start.totalBranches; + + // Add the nodes to the parallel starts here + for (FlowPiece fp : f.following) { + // FIXME do something with the remainingCounts to ensure we don't hit issues with primitive branches + if (fp instanceof FlowSegment) { + FlowSegment fs = (FlowSegment)fp; + if (fs.after == null) { // Ends in a head, not a fork + start.unvisited = new ArrayDeque(); + start.unvisited.add(fs.visited.get(0)); + } + } + } + output.add(start); + } + return output; + } + + /** + * Given a set of nodes, walks back (jumping blocks) and constructing the hierarchy of branches + * This allows us to use ForkScanner when starting with heads that are parallel branches * @param heads */ ArrayDeque leastCommonAncestor(@Nonnull Set heads) { @@ -172,7 +232,7 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) ArrayList> iterators = new ArrayList>(); ArrayList liveHeads = new ArrayList(); - ArrayDeque parallelForks = new ArrayDeque(); + ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter for (FlowNode f : heads) { iterators.add(FlowScanningUtils.filterableEnclosingBlocks(f)); @@ -198,47 +258,15 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) continue; } - FlowNode nextHead = blockStarts.next(); - FlowPiece existingBranch = branches.get(nextHead); - if (existingBranch != null) { // Joining into an existing branch - // Found a case where they converge, replace with a convergent branch - if (existingBranch instanceof Fork) { // Joining an existing fork with other branches - Fork f = (Fork) existingBranch; - f.following.add(myPiece); - } else { // We've hit a new fork, split the segment and add it to top the parallels (it's higher than previous ones) - parallelForks.add((((FlowSegment) existingBranch).split(branches, (BlockStartNode) nextHead, myPiece))); - } + boolean didMerge = checkForMerge(branches, myPiece, blockStarts.next(), parallelForks); + if (didMerge) { itIterator.remove(); pieceIterator.remove(); - } else { - myPiece.add(nextHead); - branches.put(nextHead, myPiece); } } } - // Walk through and convert forks to parallel block starts, and find heads that point to them - ArrayDeque output = new ArrayDeque(); - for (Fork f : parallelForks) { - // Do processing to assign heads to flowsegments - ParallelBlockStart start = new ParallelBlockStart(); - start.totalBranches = f.following.size(); - start.forkStart = f.forkStart; - start.remainingBranches = start.totalBranches; - - // Add the nodes to the parallel starts here - for (FlowPiece fp : f.following) { - if (fp instanceof FlowSegment) { - FlowSegment fs = (FlowSegment)fp; - if (fs.after == null) { // Ends in a head, not a fork - start.unvisited = new ArrayDeque(); - start.unvisited.add(fs.visited.get(0)); - } - } - } - output.add(start); - } - return output; + return convertForksToBlockStarts(parallelForks); } @Override diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 09a15e51..22741b02 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -27,12 +27,10 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; -import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.FlowNode; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; -import org.jenkinsci.plugins.workflow.steps.StepDescriptor; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Rule; @@ -40,7 +38,6 @@ import org.jvnet.hudson.test.BuildWatcher; import org.jvnet.hudson.test.JenkinsRule; -import javax.annotation.Nonnull; import java.util.AbstractSet; import java.util.ArrayList; import java.util.Arrays; @@ -50,8 +47,14 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.NoSuchElementException; -import java.util.TreeSet; +// Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers +import static org.jenkinsci.plugins.workflow.graphanalysis.FlowTestUtils.*; + +/** + * Tests for all the core parts of graph analysis except the ForkScanner, internals which is complex enough to merit its own tests + * @author Sam Van Oort + */ public class FlowScannerTest { @ClassRule @@ -59,64 +62,11 @@ public class FlowScannerTest { @Rule public JenkinsRule r = new JenkinsRule(); - public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) { - Predicate outputPredicate = new Predicate() { - @Override - public boolean apply(FlowNode input) { - if (input instanceof StepAtomNode) { - StepAtomNode san = (StepAtomNode)input; - StepDescriptor sd = san.getDescriptor(); - return sd != null && descriptorId.equals(sd.getId()); - } - return false; - } - }; - return outputPredicate; - } - - Predicate MATCH_ECHO_STEP = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); - - static final class CollectingVisitor implements FlowNodeVisitor { - ArrayList visited = new ArrayList(); - - @Override - public boolean visit(@Nonnull FlowNode f) { - visited.add(f); - return true; - } - - public void reset() { - this.visited.clear(); - } - - public ArrayList getVisited() { - return visited; - } - }; - - /** Assert node ordering using their ids */ - public void assertNodeOrder(String description, Iterable nodes, String... nodeIds) { - ArrayList realIds = new ArrayList(); - for (FlowNode f: nodes) { - Assert.assertNotNull(f); - realIds.add(f.getId()); - } - Assert.assertArrayEquals(description, nodeIds, realIds.toArray()); - } - - /** Assert node ordering using iotas for their ids */ - public void assertNodeOrder(String description, Iterable nodes, int... nodeIds) { - String[] nodeIdStrings = new String[nodeIds.length]; - for (int i=0; i matchEchoStep = predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + Predicate matchEchoStep = FlowTestUtils.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); FlowExecution exec = b.getExecution(); Collection heads = exec.getCurrentHeads(); @@ -413,16 +363,23 @@ public void testParallelScan() throws Exception { scanner = new ForkScanner(); scanner.setup(heads); assertNodeOrder("ForkedScanner", scanner, 15, 14, 13, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2); + scanner.setup(heads, Collections.singleton(exec.getNode("9"))); + assertNodeOrder("ForkedScanner", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2); - /*ArrayList forkedHeads = new ArrayList(); - forkedHeads.add(exec.getNode("9")); - forkedHeads.add(exec.getNode("11")); - matches = scanner.filteredNodes(forkedHeads, null, MATCH_ECHO_STEP); - Assert.assertEquals(5, matches.size());*/ + // Test forkscanner midflow + scanner.setup(exec.getNode("14")); + assertNodeOrder("ForkedScanner", scanner, 14, 13, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2); - // Start in one branch, test the forkscanning - Assert.assertEquals(3, scanner.filteredNodes(exec.getNode("12"), MATCH_ECHO_STEP).size()); - Assert.assertEquals(2, scanner.filteredNodes(exec.getNode("9"), MATCH_ECHO_STEP).size()); + // Test forkscanner inside a parallel + /* + List startingPoints = Arrays.asList(exec.getNode("9"), exec.getNode("12")); + scanner.setup(startingPoints); + assertNodeOrder("ForkedScanner", scanner, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2); + + startingPoints = Arrays.asList(exec.getNode("9"), exec.getNode("11")); + scanner.setup(startingPoints); + assertNodeOrder("ForkedScanner", scanner, 9, 8, 6, 11, 10, 7, 4, 3, 2); + */ // Filtering at different points within branches List blackList = Arrays.asList(exec.getNode("6"), exec.getNode("7")); @@ -473,73 +430,4 @@ public void testNestedParallelScan() throws Exception { matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(7, matches.size()); } - - /** Unit tests for the innards of the ForkScanner */ - @Test - public void testForkedScanner() throws Exception { - - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "Convoluted"); - job.setDefinition(new CpsFlowDefinition( - "echo 'first'\n" + - "def steps = [:]\n" + - "steps['1'] = {\n" + - " echo 'do 1 stuff'\n" + - "}\n" + - "steps['2'] = {\n" + - " echo '2a'\n" + - " echo '2b'\n" + - "}\n" + - "parallel steps\n" + - "echo 'final'" - )); - - /** Flow structure (ID - type) - 2 - FlowStartNode (BlockStartNode) - 3 - Echostep - 4 - ParallelStep (StepStartNode) (start branches) - 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 - 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 - 8 - EchoStep, (branch 1) parent=6 - 9 - StepEndNode, (end branch 1) startId=6, parentId=8 - 10 - EchoStep, (branch 2) parentId=7 - 11 - EchoStep, (branch 2) parentId = 10 - 12 - StepEndNode (end branch 2) startId=7 parentId=11, - 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 - 14 - EchoStep - 15 - FlowEndNode (BlockEndNode) - */ - - WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - FlowExecution exec = b.getExecution(); - Collection heads = b.getExecution().getCurrentHeads(); - - // Initial case - ForkScanner scanner = new ForkScanner(); - scanner.setup(heads, null); - Assert.assertNull(scanner.currentParallelStart); - Assert.assertNull(scanner.currentParallelStartNode); - Assert.assertNotNull(scanner.parallelBlockStartStack); - Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); - Assert.assertTrue(scanner.isWalkingFromFinish()); - - // Fork case - scanner.setup(exec.getNode("13")); - Assert.assertFalse(scanner.isWalkingFromFinish()); - Assert.assertEquals("13", scanner.next().getId()); - Assert.assertNotNull(scanner.parallelBlockStartStack); - Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); - Assert.assertEquals(exec.getNode("4"), scanner.currentParallelStartNode); - - ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; - Assert.assertEquals(2, start.totalBranches); - Assert.assertEquals(1, start.remainingBranches); - Assert.assertEquals(1, start.unvisited.size()); - Assert.assertEquals(exec.getNode("4"), start.forkStart); - - Assert.assertEquals(exec.getNode("9"), scanner.next()); - Assert.assertEquals(exec.getNode("8"), scanner.next()); - Assert.assertEquals(exec.getNode("6"), scanner.next()); - FlowNode f = scanner.next(); - Assert.assertEquals(exec.getNode("12"), f); - } } \ No newline at end of file diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java new file mode 100644 index 00000000..6b4984d6 --- /dev/null +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java @@ -0,0 +1,108 @@ +package org.jenkinsci.plugins.workflow.graphanalysis;/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +import com.google.common.base.Predicate; +import org.jenkinsci.plugins.workflow.cps.nodes.StepAtomNode; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.FlowNode; +import org.jenkinsci.plugins.workflow.steps.StepDescriptor; +import org.junit.Assert; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; + +/** + * Utilities for testing flow scanning + * @author Sam Van Oort + */ +public class FlowTestUtils { + public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) { + Predicate outputPredicate = new Predicate() { + @Override + public boolean apply(FlowNode input) { + if (input instanceof StepAtomNode) { + StepAtomNode san = (StepAtomNode)input; + StepDescriptor sd = san.getDescriptor(); + return sd != null && descriptorId.equals(sd.getId()); + } + return false; + } + }; + return outputPredicate; + } + + public static final class CollectingVisitor implements FlowNodeVisitor { + ArrayList visited = new ArrayList(); + + @Override + public boolean visit(@Nonnull FlowNode f) { + visited.add(f); + return true; + } + + public void reset() { + this.visited.clear(); + } + + public ArrayList getVisited() { + return visited; + } + } + + public static Predicate MATCH_ECHO_STEP = FlowTestUtils.predicateMatchStepDescriptor("org.jenkinsci.plugins.workflow.steps.EchoStep"); + + /** Assert node ordering using their ids */ + public static void assertNodeOrder(String description, Iterable nodes, String... nodeIds) { + ArrayList realIds = new ArrayList(); + for (FlowNode f: nodes) { + Assert.assertNotNull(f); + realIds.add(f.getId()); + } + Assert.assertArrayEquals(description, nodeIds, realIds.toArray()); + } + + /** Assert node ordering using iotas for their ids */ + public static void assertNodeOrder(String description, Iterable nodes, int... nodeIds) { + String[] nodeIdStrings = new String[nodeIds.length]; + for (int i=0; i coll, FlowExecution exec, int... iotas) { + try { + for (int nodeId : iotas) { + coll.add(exec.getNode(Integer.toString(nodeId))); + } + } catch (IOException ioe) { + throw new IllegalStateException("Failed to load node by id", ioe); + } + + } +} diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java new file mode 100644 index 00000000..c3265020 --- /dev/null +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -0,0 +1,263 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.jvnet.hudson.test.BuildWatcher; +import org.jvnet.hudson.test.JenkinsRule; +import org.junit.Assert; + +import java.util.Collection; +import java.util.HashMap; + +// Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers +import static org.jenkinsci.plugins.workflow.graphanalysis.FlowTestUtils.*; + +/** + * Tests for internals of ForkScanner + */ +public class ForkScannerTest { + @ClassRule + public static BuildWatcher buildWatcher = new BuildWatcher(); + + @Rule + public JenkinsRule r = new JenkinsRule(); + + /** Flow structure (ID - type) + 2 - FlowStartNode (BlockStartNode) + 3 - Echostep + 4 - ParallelStep (StepStartNode) (start branches) + 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 + 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 + 8 - EchoStep, (branch 1) parent=6 + 9 - StepEndNode, (end branch 1) startId=6, parentId=8 + 10 - EchoStep, (branch 2) parentId=7 + 11 - EchoStep, (branch 2) parentId = 10 + 12 - StepEndNode (end branch 2) startId=7 parentId=11, + 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 + 14 - EchoStep + 15 - FlowEndNode (BlockEndNode) + */ + WorkflowRun SIMPLE_PARALLEL_RUN; + WorkflowRun NESTED_PARALLEL_RUN; + + @Before + public void setUp() throws Exception { + r.jenkins.getInjector().injectMembers(this); + + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "SimpleParallel"); + job.setDefinition(new CpsFlowDefinition( + "echo 'first'\n" + + "def steps = [:]\n" + + "steps['1'] = {\n" + + " echo 'do 1 stuff'\n" + + "}\n" + + "steps['2'] = {\n" + + " echo '2a'\n" + + " echo '2b'\n" + + "}\n" + + "parallel steps\n" + + "echo 'final'" + )); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + this.SIMPLE_PARALLEL_RUN = b; + + job = r.jenkins.createProject(WorkflowJob.class, "NestedParallel"); + job.setDefinition(new CpsFlowDefinition( + "echo 'first'\n" + + "def steps = [:]\n" + + "steps['1'] = {\n" + + " echo 'do 1 stuff'\n" + + "}\n" + + "steps['2'] = {\n" + + " echo '2a'\n" + + " def nested = [:]\n" + + " nested['2-1'] = {\n" + + " echo 'do 2-1'\n" + + " } \n" + + " nested['2-2'] = {\n" + + " sleep 1\n" + + " echo '2 section 2'\n" + + " }\n" + + " echo '2b'\n" + + " parallel nested\n" + + "}\n" + + "parallel steps\n" + + "echo 'final'" + )); + b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + this.NESTED_PARALLEL_RUN = b; + } + + @Test + public void testForkedScanner() throws Exception { + FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); + Collection heads = SIMPLE_PARALLEL_RUN.getExecution().getCurrentHeads(); + + // Initial case + ForkScanner scanner = new ForkScanner(); + scanner.setup(heads, null); + Assert.assertNull(scanner.currentParallelStart); + Assert.assertNull(scanner.currentParallelStartNode); + Assert.assertNotNull(scanner.parallelBlockStartStack); + Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); + Assert.assertTrue(scanner.isWalkingFromFinish()); + + // Fork case + scanner.setup(exec.getNode("13")); + Assert.assertFalse(scanner.isWalkingFromFinish()); + Assert.assertEquals("13", scanner.next().getId()); + Assert.assertNotNull(scanner.parallelBlockStartStack); + Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); + Assert.assertEquals(exec.getNode("4"), scanner.currentParallelStartNode); + + ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; + Assert.assertEquals(2, start.totalBranches); + Assert.assertEquals(1, start.remainingBranches); + Assert.assertEquals(1, start.unvisited.size()); + Assert.assertEquals(exec.getNode("4"), start.forkStart); + + Assert.assertEquals(exec.getNode("9"), scanner.next()); + Assert.assertEquals(exec.getNode("8"), scanner.next()); + Assert.assertEquals(exec.getNode("6"), scanner.next()); + FlowNode f = scanner.next(); + Assert.assertEquals(exec.getNode("12"), f); + + // Now we test the least common ancestor bits + } + + @Test + public void testFlowSegmentSplit() throws Exception { + FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); + + /** Flow structure (ID - type) + 2 - FlowStartNode (BlockStartNode) + 3 - Echostep + 4 - ParallelStep (StepStartNode) (start branches) + 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 + 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 + 8 - EchoStep, (branch 1) parent=6 + 9 - StepEndNode, (end branch 1) startId=6, parentId=8 + 10 - EchoStep, (branch 2) parentId=7 + 11 - EchoStep, (branch 2) parentId = 10 + 12 - StepEndNode (end branch 2) startId=7 parentId=11, + 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 + 14 - EchoStep + 15 - FlowEndNode (BlockEndNode) + */ + + HashMap nodeMap = new HashMap(); + ForkScanner.FlowSegment mainBranch = new ForkScanner.FlowSegment(); + ForkScanner.FlowSegment sideBranch = new ForkScanner.FlowSegment(); + FlowNode BRANCH1_END = exec.getNode("9"); + FlowNode BRANCH2_END = exec.getNode("12"); + FlowNode START_PARALLEL = exec.getNode("4"); + + // Branch 1, we're going to run one flownode beyond the start of the parallel branch and then split + mainBranch.add(BRANCH1_END); + mainBranch.add(exec.getNode("8")); + mainBranch.add(exec.getNode("6")); + mainBranch.add(exec.getNode("4")); + mainBranch.add(exec.getNode("3")); // FlowNode beyond the fork point + for (FlowNode f : mainBranch.visited) { + nodeMap.put(f, mainBranch); + } + assertNodeOrder("Visited nodes", mainBranch.visited, 9, 8, 6, 4, 3); + + // Branch 2 + sideBranch.add(BRANCH2_END); + sideBranch.add(exec.getNode("11")); + sideBranch.add(exec.getNode("10")); + sideBranch.add(exec.getNode("7")); + for (FlowNode f : sideBranch.visited) { + nodeMap.put(f, sideBranch); + } + assertNodeOrder("Visited nodes", sideBranch.visited, 12, 11, 10, 7); + + ForkScanner.Fork forked = mainBranch.split(nodeMap, (BlockStartNode)exec.getNode("4"), sideBranch); + ForkScanner.FlowSegment splitSegment = (ForkScanner.FlowSegment)nodeMap.get(BRANCH1_END); // New branch + Assert.assertNull(splitSegment.after); + assertNodeOrder("Branch 1 split after fork", splitSegment.visited, 9, 8, 6); + + // Just the single node before the fork + Assert.assertEquals(forked, mainBranch.after); + assertNodeOrder("Head of flow, pre-fork", mainBranch.visited, 3); + + // Fork point + Assert.assertEquals(forked, nodeMap.get(START_PARALLEL)); + ForkScanner.FlowPiece[] follows = {splitSegment, sideBranch}; + Assert.assertArrayEquals(follows, forked.following.toArray()); + + // Branch 2 + Assert.assertEquals(sideBranch, nodeMap.get(BRANCH2_END)); + assertNodeOrder("Branch 2", sideBranch.visited, 12, 11, 10, 7); + + // Test me where splitting right at a fork point, where we should have a fork with and main branch shoudl become following + // Along with side branch (branch2) + nodeMap.clear(); + mainBranch = new ForkScanner.FlowSegment(); + sideBranch = new ForkScanner.FlowSegment(); + mainBranch.visited.add(exec.getNode("6")); + mainBranch.visited.add(START_PARALLEL); + sideBranch.visited.add(exec.getNode("7")); + for (FlowNode f : mainBranch.visited) { + nodeMap.put(f, mainBranch); + } + nodeMap.put(exec.getNode("7"), sideBranch); + + forked = mainBranch.split(nodeMap, (BlockStartNode)exec.getNode("4"), sideBranch); + follows = new ForkScanner.FlowSegment[2]; + follows[0] = mainBranch; + follows[1] = sideBranch; + Assert.assertArrayEquals(follows, forked.following.toArray()); + assertNodeOrder("Branch1", mainBranch.visited, 6); + Assert.assertNull(mainBranch.after); + assertNodeOrder("Branch2", sideBranch.visited, 7); + Assert.assertNull(sideBranch.after); + Assert.assertEquals(forked, nodeMap.get(START_PARALLEL)); + Assert.assertEquals(mainBranch, nodeMap.get(exec.getNode("6"))); + Assert.assertEquals(sideBranch, nodeMap.get(exec.getNode("7"))); + } + + @Test + public void testBranchMerge() throws Exception { + FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); + + } + + @Test + public void testParallelBranchCreation() throws Exception { + + } +} From 4ee5cec21af6f8c4ab75f6fff93079fe41640d81 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 25 May 2016 18:42:56 -0400 Subject: [PATCH 041/104] WIP for ForkScanner - tests and 1 fix for leastCommonAncestor --- .../graphanalysis/FilteratorImpl.java | 2 +- .../workflow/graphanalysis/ForkScanner.java | 24 ++----- .../graphanalysis/FlowScannerTest.java | 34 ++++++++++ .../graphanalysis/ForkScannerTest.java | 66 +++++++++++++++++-- 4 files changed, 104 insertions(+), 22 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index 9294ac27..6ba13c6c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -32,7 +32,7 @@ /** Filters an iterator against a match predicate by wrapping an iterator * @author Sam Van Oort */ -public class FilteratorImpl implements Filterator { +class FilteratorImpl implements Filterator { private boolean hasNext = false; private T nextVal; private Iterator wrapped; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index abbf509a..26888365 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -34,9 +34,8 @@ import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.ListIterator; import java.util.Set; @@ -109,10 +108,10 @@ static class FlowSegment implements FlowPiece { FlowPiece after; /** - * We have discovered a forking node intersecting our FlowSegment in the middle - * Now we need to split the flow + * We have discovered a forking node intersecting our FlowSegment in the middle or meeting at the end + * Now we need to split the flow, or pull out the fork point and make both branches follow it * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments) - * @param forkPoint Node where the flows intersec + * @param forkPoint Node where the branches intersect/meet * @param forkBranch Flow piece that is joining this * @return Fork where split occurred */ @@ -130,10 +129,7 @@ public Fork split(@Nonnull HashMap nodeMapping, @Nonnull Bl // to a new segment and add that to the fork FlowSegment newSegment = new FlowSegment(); newSegment.after = this.after; - - if (index < visited.size()) { - newSegment.visited.addAll(this.visited.subList(0, index)); - } + newSegment.visited.addAll(this.visited.subList(0, index)); newFork.following.add(newSegment); newFork.following.add(forkBranch); this.after = newFork; @@ -161,12 +157,6 @@ static class Fork extends ParallelBlockStart implements FlowPiece { public Fork(BlockStartNode forkNode) { this.forkStart = forkNode; } - - public ParallelBlockStart toSimple() { - ParallelBlockStart st = new ParallelBlockStart(); - - return st; - } } /** Subcomponent of least-common-ancestor: check for merge of branches @@ -205,6 +195,7 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall start.totalBranches = f.following.size(); start.forkStart = f.forkStart; start.remainingBranches = start.totalBranches; + start.unvisited = new ArrayDeque(); // Add the nodes to the parallel starts here for (FlowPiece fp : f.following) { @@ -212,7 +203,6 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall if (fp instanceof FlowSegment) { FlowSegment fs = (FlowSegment)fp; if (fs.after == null) { // Ends in a head, not a fork - start.unvisited = new ArrayDeque(); start.unvisited.add(fs.visited.get(0)); } } @@ -273,7 +263,7 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 1) { //throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); - leastCommonAncestor(new HashSet(heads)); + leastCommonAncestor(new LinkedHashSet(heads)); walkingFromFinish = false; } else { FlowNode f = heads.iterator().next(); diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 22741b02..840b7959 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -359,6 +359,9 @@ public void testParallelScan() throws Exception { scanner.setup(heads, Collections.singleton(exec.getNode("9"))); assertNodeOrder("Linear", scanner, 15, 14, 13, 12, 11, 10, 7, 4, 3, 2); + scanner.setup(Arrays.asList(exec.getNode("9"), exec.getNode("12"))); + assertNodeOrder("Depth-first scanner from inside parallels", scanner, 9, 8, 6, 4, 3, 2, 12, 11, 10, 7); + // We're going to test the ForkScanner in more depth since this is its natural use scanner = new ForkScanner(); scanner.setup(heads); @@ -415,12 +418,43 @@ public void testNestedParallelScan() throws Exception { "echo 'final'" )); + /** Parallel nested in parallel (ID-type) + * 2 - FlowStartNode (BlockStartNode) + * 3 - Echostep + * 4 - ParallelStep (stepstartnode) + * 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 + * 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 + * 8 - EchoStep (branch #1) - parentId=6 + * 9 - StepEndNode (end branch #1) - startId=6 + * 10 - EchoStep - parentId=7 + * 11 - EchoStep + * 12 - ParallelStep (StepStartNode) - start inner parallel + * 14 - ParallelStep (StepStartNode) (start branch 2-1), parentId=12, ParallelLabellAction with branchName=2-1 + * 15 - ParallelStep (StepStartNode) (start branch 2-2), parentId=12, ParallelLabelAction with branchName=2-2 + * 16 - Echo (Branch2-1), parentId=14 + * 17 - StepEndNode (end branch 2-1), parentId=16, startId=14 + * 18 - SleepStep (branch 2-2) parentId=15 + * 19 - EchoStep (branch 2-2) + * 20 - StepEndNode (end branch 2-2), startId=15 + * 21 - StepEndNode (end inner parallel), parentIds=17,20, startId=12 + * 22 - StepEndNode (end parallel #2), parent=21, startId=7 + * 23 - StepEndNode (end outer parallel), parentIds=9,22, startId=4 + * 24 - Echo + * 25 - FlowEndNode + */ + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); FlowExecution exec = b.getExecution(); Collection heads = b.getExecution().getCurrentHeads(); // Basic test of DepthFirstScanner AbstractFlowScanner scanner = new DepthFirstScanner(); + scanner.setup(heads); + /*assertNodeOrder("Depth first with recursion", scanner, 25, 24, 23, + 9, 8, 6, 4, 3, 2, //Branch 1 + 22,17,16,14, //Branch 2-1 + + );*/ Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(7, matches.size()); diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index c3265020..84d57384 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -38,8 +38,15 @@ import org.jvnet.hudson.test.JenkinsRule; import org.junit.Assert; +import java.lang.reflect.Array; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Set; // Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers import static org.jenkinsci.plugins.workflow.graphanalysis.FlowTestUtils.*; @@ -70,6 +77,31 @@ public class ForkScannerTest { 15 - FlowEndNode (BlockEndNode) */ WorkflowRun SIMPLE_PARALLEL_RUN; + + /** Parallel nested in parallel (ID-type) + * 2 - FlowStartNode (BlockStartNode) + * 3 - Echostep + * 4 - ParallelStep (stepstartnode) + * 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 + * 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 + * 8 - EchoStep (branch #1) - parentId=6 + * 9 - StepEndNode (end branch #1) - startId=6 + * 10 - EchoStep - parentId=7 + * 11 - EchoStep + * 12 - ParallelStep (StepStartNode) - start inner parallel + * 14 - ParallelStep (StepStartNode) (start branch 2-1), parentId=12, ParallelLabellAction with branchName=2-1 + * 15 - ParallelStep (StepStartNode) (start branch 2-2), parentId=12, ParallelLabelAction with branchName=2-2 + * 16 - Echo (Branch2-1), parentId=14 + * 17 - StepEndNode (end branch 2-1), parentId=16, startId=14 + * 18 - SleepStep (branch 2-2) parentId=15 + * 19 - EchoStep (branch 2-2) + * 20 - StepEndNode (end branch 2-2), startId=15 + * 21 - StepEndNode (end inner parallel ), parentIds=17,20, startId=12 + * 22 - StepEndNode (end parallel #2), parent=21, startId=7 + * 23 - StepEndNode (end outer parallel), parentIds=9,22, startId=4 + * 24 - Echo + * 25 - FlowEndNode + */ WorkflowRun NESTED_PARALLEL_RUN; @Before @@ -251,13 +283,39 @@ public void testFlowSegmentSplit() throws Exception { } @Test - public void testBranchMerge() throws Exception { + public void testLeastCommonAncestor() throws Exception { FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); - } + ForkScanner scan = new ForkScanner(); + // Starts at the ends of the parallel branches + Set heads = new LinkedHashSet(Arrays.asList(exec.getNode("12"), exec.getNode("9"))); + ArrayDeque starts = scan.leastCommonAncestor(heads); + Assert.assertEquals(1, starts.size()); - @Test - public void testParallelBranchCreation() throws Exception { + ForkScanner.ParallelBlockStart start = starts.peek(); + Assert.assertEquals(2, start.totalBranches); + Assert.assertEquals(2, start.unvisited.size()); + Assert.assertEquals(2, start.remainingBranches); + Assert.assertEquals(exec.getNode("4"), start.forkStart); + Assert.assertArrayEquals(heads.toArray(), start.unvisited.toArray()); + + /** Now we do the same with nested run */ + exec = NESTED_PARALLEL_RUN.getExecution(); + heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20"))); + starts = scan.leastCommonAncestor(heads); + Assert.assertEquals(2, starts.size()); + ForkScanner.ParallelBlockStart inner = starts.getFirst(); + ForkScanner.ParallelBlockStart outer = starts.getLast(); + + Assert.assertEquals(2, inner.remainingBranches); + Assert.assertEquals(2, inner.totalBranches); + Assert.assertEquals(2, inner.unvisited.size()); + Assert.assertEquals(exec.getNode("12"), inner.forkStart); + Assert.assertEquals(2, outer.remainingBranches); + Assert.assertEquals(2, outer.totalBranches); + Assert.assertEquals(1, outer.unvisited.size()); + Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek()); + Assert.assertEquals(exec.getNode("4"), outer.forkStart); } } From 6e28d68cf0aac5d5bfccd9ff678f75c906cd2b5d Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 30 May 2016 23:59:26 -0400 Subject: [PATCH 042/104] Fix the remaining issues with LeastCommonAncestor core & add comments --- .../graphanalysis/AbstractFlowScanner.java | 5 +- .../graphanalysis/DepthFirstScanner.java | 2 +- .../workflow/graphanalysis/ForkScanner.java | 128 ++++++++++-------- .../graphanalysis/FlowScannerTest.java | 6 - .../graphanalysis/ForkScannerTest.java | 5 + 5 files changed, 82 insertions(+), 64 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 6fda5e93..cc4eb60c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -54,13 +54,14 @@ * - {@link Filterator}: If initialized as an Iterator, each FlowScanner can provide a filtered view from the current point in time. * - Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized. * - * All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #blackList} nodes (exclusive) or reach the end of the DAG. + * All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. * If blackList nodes are an empty collection or null, APIs will walk to the beginning of the FlowGraph. * Multiple blackList nodes are helpful for putting separate bounds on walking different parallel branches. * * Key Points: * - There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures). - * - Each implementation provides its own iteration order (described in its javadoc comments). + * - Each implementation provides its own iteration order (described in its javadoc comments), + * but it is generally unsafe to rely on parallel branches being visited in a specific order. * - Implementations may visit some or all points in the DAG, this should be called out in the class's javadoc comments * - FlowScanners are NOT thread safe, for performance reasons and because it is too hard to guarantee. * - Many fields and methods are protected: this is intentional to allow building upon the implementations for more complex analyses. diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 9d6c272e..3794deed 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -37,7 +37,7 @@ /** Does a simple and somewhat efficient depth-first search of all FlowNodes in the DAG. * * Iteration order: depth-first search, revisiting parallel branches once done. - * With parallel branches, parents are visited in the order encountered. + * With parallel branches, the first branch is explored, then remaining branches are explored in reverse order. * * The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 26888365..ab5f94d4 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -24,6 +24,7 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import com.sun.tools.javac.comp.Flow; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowEndNode; @@ -83,12 +84,12 @@ public boolean isWalkingFromFinish() { return walkingFromFinish; } - /** Tracks state for parallel blocks */ + /** Tracks state for parallel blocks, so we can ensure all are visited and know the heads */ protected static class ParallelBlockStart { protected BlockStartNode forkStart; // This is the node with child branches protected int remainingBranches; protected int totalBranches; - protected ArrayDeque unvisited; // Remaining branches of this that we have have not visited yet + protected ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { this.forkStart = forkStart; @@ -99,49 +100,62 @@ protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { ParallelBlockStart() {} } - interface FlowPiece { - // Marker interface for now, so we don't just inherit from object + interface FlowPiece { // Mostly a marker + /** If true we are a leaf, I.E. no forks follow this */ + boolean isLeaf(); } + /** Linear (no parallels) run of FLowNodes */ static class FlowSegment implements FlowPiece { ArrayList visited = new ArrayList(); FlowPiece after; + boolean isLeaf = true; + + @Override + public boolean isLeaf() { + return isLeaf; + } /** * We have discovered a forking node intersecting our FlowSegment in the middle or meeting at the end * Now we need to split the flow, or pull out the fork point and make both branches follow it * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments) - * @param forkPoint Node where the branches intersect/meet - * @param forkBranch Flow piece that is joining this - * @return Fork where split occurred + * @param joinPoint Node where the branches intersect/meet (fork point) + * @param joiningBranch Flow piece that is joining this + * @return Recreated fork */ - public Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode forkPoint, @Nonnull FlowPiece forkBranch) { - int index = visited.lastIndexOf(forkPoint); // Fork will be closer to end, so this is better than indexOf - Fork newFork = new Fork(forkPoint); + Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode joinPoint, @Nonnull FlowPiece joiningBranch) { + int index = visited.lastIndexOf(joinPoint); // Fork will be closer to end, so this is better than indexOf + Fork newFork = new Fork(joinPoint); + + // FIXME fork following fork with no segment! --- 30 May fixed, I think? if (index < 0) { throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment"); - } else if (index == this.visited.size()-1) { // We forked just off the end + } else if (index == this.visited.size()-1) { // We forked just off the most recent node newFork.following.add(this); - newFork.following.add(forkBranch); + newFork.following.add(joiningBranch); this.visited.remove(index); + } else if (index == 0) { + throw new IllegalStateException("We have a cyclic graph somehow!?"); } else { // Splitting at some midpoint within the segment, everything before becomes part of the following // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it - // to a new segment and add that to the fork + // to a new segment and add that to the fork. + FlowSegment newSegment = new FlowSegment(); newSegment.after = this.after; newSegment.visited.addAll(this.visited.subList(0, index)); newFork.following.add(newSegment); - newFork.following.add(forkBranch); + newFork.following.add(joiningBranch); this.after = newFork; + this.isLeaf = false; // Remove the part before the fork point - this.visited.subList(0, index+1).clear(); for (FlowNode n : newSegment.visited) { nodeMapping.put(n, newSegment); } } - nodeMapping.put(forkPoint, newFork); + nodeMapping.put(joinPoint, newFork); return newFork; } @@ -154,34 +168,13 @@ public void add(FlowNode f) { static class Fork extends ParallelBlockStart implements FlowPiece { List following = new ArrayList(); - public Fork(BlockStartNode forkNode) { - this.forkStart = forkNode; + @Override + public boolean isLeaf() { + return false; } - } - /** Subcomponent of least-common-ancestor: check for merge of branches - * Pulled out to allow for unit testing, and to simplify logic. - * - * Basically this looks to see if a branch intersects an existing one (where a node points to an existing FlowPiece) - * If they intersect, the branch is merged onto the existing one, splitting it and creating a fork if needed. - * Otherwise, it gets a new FlowNode added in - * @return true if the next node from myPiece merged with an existing branch, false if we just added another head - */ - boolean checkForMerge(final HashMap branches, FlowSegment myPiece, FlowNode nextHead, ArrayDeque parallelForks) { - FlowPiece existingBranch = branches.get(nextHead); - if (existingBranch != null) { // Joining into an existing branch - // Found a case where they converge, replace with a convergent branch - if (existingBranch instanceof Fork) { // Joining an existing fork with other branches - Fork f = (Fork) existingBranch; - f.following.add(myPiece); - } else { // We've hit a new fork, split the segment and add it to top the parallels (it's higher than previous ones) - parallelForks.add((((FlowSegment) existingBranch).split(branches, (BlockStartNode) nextHead, myPiece))); - } - return true; - } else { - myPiece.add(nextHead); - branches.put(nextHead, myPiece); - return false; + public Fork(BlockStartNode forkNode) { + this.forkStart = forkNode; } } @@ -200,11 +193,8 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall // Add the nodes to the parallel starts here for (FlowPiece fp : f.following) { // FIXME do something with the remainingCounts to ensure we don't hit issues with primitive branches - if (fp instanceof FlowSegment) { - FlowSegment fs = (FlowSegment)fp; - if (fs.after == null) { // Ends in a head, not a fork - start.unvisited.add(fs.visited.get(0)); - } + if (fp.isLeaf()) { // Forks are never leaves + start.unvisited.add(((FlowSegment)fp).visited.get(0)); } } output.add(start); @@ -214,13 +204,14 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall /** * Given a set of nodes, walks back (jumping blocks) and constructing the hierarchy of branches + * Back to the least common ancestor (where parallel branches diverge) * This allows us to use ForkScanner when starting with heads that are parallel branches * @param heads */ ArrayDeque leastCommonAncestor(@Nonnull Set heads) { HashMap branches = new HashMap(); ArrayList> iterators = new ArrayList>(); - ArrayList liveHeads = new ArrayList(); + ArrayList liveHeads = new ArrayList(); ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter @@ -235,21 +226,48 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) // Walk through, merging flownodes one-by-one until everything has merged to one ancestor while (iterators.size() > 1) { ListIterator> itIterator = iterators.listIterator(); - ListIterator pieceIterator = liveHeads.listIterator(); + ListIterator pieceIterator = liveHeads.listIterator(); while (itIterator.hasNext()) { - Filterator blockStarts = itIterator.next(); - FlowSegment myPiece = pieceIterator.next(); + Filterator blockStartIterator = itIterator.next(); + FlowPiece myPiece = pieceIterator.next(); // Welp we hit the end of a branch - if (!blockStarts.hasNext()) { + if (!blockStartIterator.hasNext()) { pieceIterator.remove(); itIterator.remove(); continue; } - boolean didMerge = checkForMerge(branches, myPiece, blockStarts.next(), parallelForks); - if (didMerge) { + FlowNode nextBlockStart = blockStartIterator.next(); + + // THIS WAS ALL IN THE CHECKFORMERGE function call + FlowPiece existingPiece = branches.get(nextBlockStart); + if (existingPiece == null && myPiece instanceof Fork) { // Start a segment preceding the fork + FlowSegment newSegment = new FlowSegment(); + newSegment.isLeaf = false; + newSegment.add(nextBlockStart); + newSegment.after = myPiece; + pieceIterator.remove(); + pieceIterator.add(newSegment); + branches.put(nextBlockStart, newSegment); + } else if (existingPiece == null && myPiece instanceof FlowSegment) { // Add to segment + ((FlowSegment) myPiece).add(nextBlockStart); + branches.put(nextBlockStart, myPiece); + } else { // We're merging into another thing, remove this entry, we're done. DONE! + if (existingPiece instanceof Fork) { + ((Fork) existingPiece).following.add(myPiece); + } else { // Split a flow segment so it forks against this one + Fork f = ((FlowSegment) existingPiece).split(branches, (BlockStartNode)nextBlockStart, myPiece); + // If we split the existing segment at its end, we created a fork replacing its latest node + // Thus we must replace the piece with the fork ahead of it + if (f.following.contains(existingPiece) ) { + int headIndex = liveHeads.indexOf(existingPiece); + liveHeads.set(headIndex, f); + } + parallelForks.add(f); + } + itIterator.remove(); pieceIterator.remove(); } @@ -264,7 +282,7 @@ protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 1) { //throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); leastCommonAncestor(new LinkedHashSet(heads)); - walkingFromFinish = false; + walkingFromFinish = false;// FIXME do something with the remainingCounts to ensure we don't hit issues with primitive branches } else { FlowNode f = heads.iterator().next(); walkingFromFinish = f instanceof FlowEndNode; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 840b7959..e94c341a 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -449,12 +449,6 @@ public void testNestedParallelScan() throws Exception { // Basic test of DepthFirstScanner AbstractFlowScanner scanner = new DepthFirstScanner(); - scanner.setup(heads); - /*assertNodeOrder("Depth first with recursion", scanner, 25, 24, 23, - 9, 8, 6, 4, 3, 2, //Branch 1 - 22,17,16,14, //Branch 2-1 - - );*/ Collection matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(7, matches.size()); diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index 84d57384..e3af46d2 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -189,6 +189,7 @@ public void testForkedScanner() throws Exception { // Now we test the least common ancestor bits } + /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */ @Test public void testFlowSegmentSplit() throws Exception { FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); @@ -282,10 +283,12 @@ public void testFlowSegmentSplit() throws Exception { Assert.assertEquals(sideBranch, nodeMap.get(exec.getNode("7"))); } + /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */ @Test public void testLeastCommonAncestor() throws Exception { FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); + ForkScanner scan = new ForkScanner(); // Starts at the ends of the parallel branches Set heads = new LinkedHashSet(Arrays.asList(exec.getNode("12"), exec.getNode("9"))); @@ -302,6 +305,8 @@ public void testLeastCommonAncestor() throws Exception { /** Now we do the same with nested run */ exec = NESTED_PARALLEL_RUN.getExecution(); heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20"))); + + // Problem: we get a parallel start with the same flowsegment in the following for more than one parallel start starts = scan.leastCommonAncestor(heads); Assert.assertEquals(2, starts.size()); ForkScanner.ParallelBlockStart inner = starts.getFirst(); From 0cddee1058ed4881aaa126f3ddec5e44e60aa6e0 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 31 May 2016 00:00:54 -0400 Subject: [PATCH 043/104] Fix unused imports --- .../jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java | 1 - .../plugins/workflow/graphanalysis/ForkScannerTest.java | 3 --- 2 files changed, 4 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index ab5f94d4..4365ac7f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -24,7 +24,6 @@ package org.jenkinsci.plugins.workflow.graphanalysis; -import com.sun.tools.javac.comp.Flow; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowEndNode; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index e3af46d2..de232f58 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -38,13 +38,10 @@ import org.jvnet.hudson.test.JenkinsRule; import org.junit.Assert; -import java.lang.reflect.Array; import java.util.ArrayDeque; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; From 09a5d215464ae60dd08eef12b055831c990f179a Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 31 May 2016 11:19:20 -0400 Subject: [PATCH 044/104] Fix remaining bugs in ForkScanner, add test, and clean up docs/commments there --- .../workflow/graphanalysis/ForkScanner.java | 85 ++++++++++++++----- .../graphanalysis/FlowScannerTest.java | 8 +- 2 files changed, 69 insertions(+), 24 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 4365ac7f..ca4cc138 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -47,7 +47,7 @@ * This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: * - Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner) * - All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner) - * - For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner) + * - For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels) * * The big advantages of this approach: * - Blocks are visited in the order they end (no backtracking) - helps with working a block at a time @@ -100,7 +100,7 @@ protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { } interface FlowPiece { // Mostly a marker - /** If true we are a leaf, I.E. no forks follow this */ + /** If true, this is not a fork and has no following forks */ boolean isLeaf(); } @@ -127,7 +127,6 @@ Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStar int index = visited.lastIndexOf(joinPoint); // Fork will be closer to end, so this is better than indexOf Fork newFork = new Fork(joinPoint); - // FIXME fork following fork with no segment! --- 30 May fixed, I think? if (index < 0) { throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment"); } else if (index == this.visited.size()-1) { // We forked just off the most recent node @@ -135,7 +134,7 @@ Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStar newFork.following.add(joiningBranch); this.visited.remove(index); } else if (index == 0) { - throw new IllegalStateException("We have a cyclic graph somehow!?"); + throw new IllegalStateException("We have a cyclic graph or heads that are not separate branches!"); } else { // Splitting at some midpoint within the segment, everything before becomes part of the following // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it // to a new segment and add that to the fork. @@ -191,7 +190,6 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall // Add the nodes to the parallel starts here for (FlowPiece fp : f.following) { - // FIXME do something with the remainingCounts to ensure we don't hit issues with primitive branches if (fp.isLeaf()) { // Forks are never leaves start.unvisited.add(((FlowSegment)fp).visited.get(0)); } @@ -202,15 +200,44 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall } /** - * Given a set of nodes, walks back (jumping blocks) and constructing the hierarchy of branches - * Back to the least common ancestor (where parallel branches diverge) - * This allows us to use ForkScanner when starting with heads that are parallel branches + * Create the necessary information about parallel blocks in order to provide flowscanning from inside incomplete parallel branches + * This works by walking back to construct the tree of parallel blocks covering all heads back to the Least Common Ancestor of all heads + * (the top parallel block). One by one, as branches join, we remove them from the list of live pieces and replace with their common ancestor. + * + *

The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist: + *

    + *
  • We track FlowPieces, which are Forks (where branches merge) and FlowSegments (where there's a unforked sequence of nodes)
  • + *
  • A map of FlowNode to its containing FlowPiece is created
  • + *
  • For each head we start a new FlowSegment and create an iterator of all enclosing blocks (all we need for this)
  • + *
  • We do a series of passes through all iterators looking to see if the parent of any given piece maps to an existing FlowPiece
  • + *
      + *
    1. Where there are no mappings, we add another node to the FlowSegment
    2. + *
    3. Where an existing piece exists, if it's a Fork, we add the current piece on as a new branch
    4. + *
    5. Where an existing piece exists if it's a FlowSegment, we create a fork: + *
      • If we're joining at the most recent point, create a Fork with both branches following it, and replace that item's ForkSegment in the piece list with a Fork
      • + *
      • If joining midway through, split the segment and create a fork as needed
      + *
    6. + *
    7. When two pieces join together, we remove one from the list
    8. + *
    9. When we're down to a single piece, we have the full ancestry & we're done
    10. + *
    11. When we're down to a single piece, all heads have merged and we're done
    12. + *
    + *
  • Each time we merge a branch in, we need to remove an entry from enclosing blocks & live pieces
  • + *
+ * + *

There are some assumptions you need to know about to understand why this works: + *

    + *
  • None of the pieces have multiple parents, since we only look at enclosing blocks (only be a BlockEndNodes for a parallel block have multipel parents)
  • + *
  • No cycles exist in the graph
  • + *
  • Flow graphs are correctly constructed
  • + *
  • Heads are all separate branches
  • + *
+ * * @param heads */ ArrayDeque leastCommonAncestor(@Nonnull Set heads) { HashMap branches = new HashMap(); ArrayList> iterators = new ArrayList>(); - ArrayList liveHeads = new ArrayList(); + ArrayList livePieces = new ArrayList(); ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter @@ -218,14 +245,14 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) iterators.add(FlowScanningUtils.filterableEnclosingBlocks(f)); FlowSegment b = new FlowSegment(); b.add(f); - liveHeads.add(b); + livePieces.add(b); branches.put(f, b); } // Walk through, merging flownodes one-by-one until everything has merged to one ancestor while (iterators.size() > 1) { ListIterator> itIterator = iterators.listIterator(); - ListIterator pieceIterator = liveHeads.listIterator(); + ListIterator pieceIterator = livePieces.listIterator(); while (itIterator.hasNext()) { Filterator blockStartIterator = itIterator.next(); @@ -240,9 +267,12 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) FlowNode nextBlockStart = blockStartIterator.next(); - // THIS WAS ALL IN THE CHECKFORMERGE function call + // Look for cases where two branches merge together FlowPiece existingPiece = branches.get(nextBlockStart); - if (existingPiece == null && myPiece instanceof Fork) { // Start a segment preceding the fork + if (existingPiece == null && myPiece instanceof FlowSegment) { // No merge, just add to segment + ((FlowSegment) myPiece).add(nextBlockStart); + branches.put(nextBlockStart, myPiece); + } else if (existingPiece == null && myPiece instanceof Fork) { // No merge, we had a fork. Start a segment preceding the fork FlowSegment newSegment = new FlowSegment(); newSegment.isLeaf = false; newSegment.add(nextBlockStart); @@ -250,10 +280,7 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) pieceIterator.remove(); pieceIterator.add(newSegment); branches.put(nextBlockStart, newSegment); - } else if (existingPiece == null && myPiece instanceof FlowSegment) { // Add to segment - ((FlowSegment) myPiece).add(nextBlockStart); - branches.put(nextBlockStart, myPiece); - } else { // We're merging into another thing, remove this entry, we're done. DONE! + } else if (existingPiece != null) { // Always not null. We're merging into another thing, we're going to elliminate a branch if (existingPiece instanceof Fork) { ((Fork) existingPiece).following.add(myPiece); } else { // Split a flow segment so it forks against this one @@ -261,18 +288,20 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) // If we split the existing segment at its end, we created a fork replacing its latest node // Thus we must replace the piece with the fork ahead of it if (f.following.contains(existingPiece) ) { - int headIndex = liveHeads.indexOf(existingPiece); - liveHeads.set(headIndex, f); + int headIndex = livePieces.indexOf(existingPiece); + livePieces.set(headIndex, f); } parallelForks.add(f); } + // Merging removes the piece & its iterator from heads itIterator.remove(); pieceIterator.remove(); } } } + // If we hit issues with the ordering of blocks by depth, apply a sorting to the parallels by depth return convertForksToBlockStarts(parallelForks); } @@ -280,8 +309,13 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 1) { //throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); - leastCommonAncestor(new LinkedHashSet(heads)); - walkingFromFinish = false;// FIXME do something with the remainingCounts to ensure we don't hit issues with primitive branches + parallelBlockStartStack = leastCommonAncestor(new LinkedHashSet(heads)); + currentParallelStart = parallelBlockStartStack.pop(); + currentParallelStartNode = currentParallelStart.forkStart; + myCurrent = currentParallelStart.unvisited.pop(); + myNext = myCurrent; + currentParallelStart.remainingBranches--; + walkingFromFinish = false; } else { FlowNode f = heads.iterator().next(); walkingFromFinish = f instanceof FlowEndNode; @@ -299,6 +333,12 @@ public FlowNode getCurrentParallelStartNode() { return currentParallelStartNode; } + + /** Return number of levels deep we are in parallel blocks */ + public int getParallelDepth() { + return (currentParallelStart == null) ? 0 : 1 + parallelBlockStartStack.size(); + } + /** * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) * @param endNode Node where parents merge (final end node for the parallel block) @@ -374,7 +414,7 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (p == currentParallelStartNode) { // Terminating a parallel scan FlowNode temp = hitParallelStart(); - if (temp != null) { // Startnode for current parallel block now that it is done + if (temp != null) { // Start node for current parallel block now that it is done return temp; } } else if (!blackList.contains(p)) { @@ -393,6 +433,7 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { output = currentParallelStart.unvisited.pop(); + currentParallelStart.remainingBranches--; } return output; } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index e94c341a..4cf588bf 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -374,7 +374,7 @@ public void testParallelScan() throws Exception { assertNodeOrder("ForkedScanner", scanner, 14, 13, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2); // Test forkscanner inside a parallel - /* + List startingPoints = Arrays.asList(exec.getNode("9"), exec.getNode("12")); scanner.setup(startingPoints); assertNodeOrder("ForkedScanner", scanner, 9, 8, 6, 12, 11, 10, 7, 4, 3, 2); @@ -382,7 +382,7 @@ public void testParallelScan() throws Exception { startingPoints = Arrays.asList(exec.getNode("9"), exec.getNode("11")); scanner.setup(startingPoints); assertNodeOrder("ForkedScanner", scanner, 9, 8, 6, 11, 10, 7, 4, 3, 2); - */ + // Filtering at different points within branches List blackList = Arrays.asList(exec.getNode("6"), exec.getNode("7")); @@ -457,5 +457,9 @@ public void testNestedParallelScan() throws Exception { scanner = new ForkScanner(); matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); Assert.assertEquals(7, matches.size()); + + heads = Arrays.asList(exec.getNode("20"), exec.getNode("17"), exec.getNode("9")); + matches = scanner.filteredNodes(heads, null, MATCH_ECHO_STEP); + Assert.assertEquals(6, matches.size()); // Commented out since temporarily failing } } \ No newline at end of file From 5a7774892eb0cb59b4c82df1a68f0024edc30295 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 31 May 2016 11:36:41 -0400 Subject: [PATCH 045/104] Tidy up and HTML format the graph analysis javadocs --- .../graphanalysis/AbstractFlowScanner.java | 51 +++++++++++-------- .../graphanalysis/DepthFirstScanner.java | 4 +- .../graphanalysis/FlowNodeVisitor.java | 3 +- .../workflow/graphanalysis/ForkScanner.java | 26 ++++++---- .../LinearBlockHoppingScanner.java | 25 +++++---- 5 files changed, 62 insertions(+), 47 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index cc4eb60c..c86604a8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -43,9 +43,10 @@ /** * Core APIs and base logic for FlowScanners that extract information from a pipeline execution. * - * These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. + *

These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. * - * This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: + *

This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: + *

    * - {@link #findFirstMatch(Collection, Collection, Predicate)}: find the first FlowNode matching predicate condition. * - {@link #filteredNodes(Collection, Collection, Predicate)}: return the collection of FlowNodes matching the predicate. * - {@link #visitAll(Collection, FlowNodeVisitor)}: given a {@link FlowNodeVisitor}, invoke {@link FlowNodeVisitor#visit(FlowNode)} on each node and halt when it returns false. @@ -53,31 +54,37 @@ * after you invoke {@link #setup(Collection, Collection)} to initialize it for iteration. * - {@link Filterator}: If initialized as an Iterator, each FlowScanner can provide a filtered view from the current point in time. * - Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized. + *
* - * All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. + *

All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. * If blackList nodes are an empty collection or null, APIs will walk to the beginning of the FlowGraph. * Multiple blackList nodes are helpful for putting separate bounds on walking different parallel branches. * - * Key Points: - * - There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures). - * - Each implementation provides its own iteration order (described in its javadoc comments), - * but it is generally unsafe to rely on parallel branches being visited in a specific order. - * - Implementations may visit some or all points in the DAG, this should be called out in the class's javadoc comments - * - FlowScanners are NOT thread safe, for performance reasons and because it is too hard to guarantee. - * - Many fields and methods are protected: this is intentional to allow building upon the implementations for more complex analyses. - * - Each FlowScanner stores state internally for several reasons: - * - This state can be used to construct more advanced analyses. - * - FlowScanners can be reinitialized and reused repeatedly: the overheads of creating scanners repeatedly. + *

Key Points: + *

  • There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures).
  • + *
  • Each implementation provides its own iteration order (described in its javadoc comments), + * but it is generally unsafe to rely on parallel branches being visited in a specific order.
  • + *
  • Implementations may visit some or all points in the DAG, this should be called out in the class's javadoc comments
  • + *
  • FlowScanners are NOT thread safe, for performance reasons and because it is too hard to guarantee.
  • + *
  • Many fields and methods are protected: this is intentional to allow building upon the implementations for more complex analyses.
  • + *
  • Each FlowScanner stores state internally for several reasons:
  • + *
      + *
    • This state can be used to construct more advanced analyses.
    • + *
    • FlowScanners can be reinitialized and reused repeatedly: avoids the overheads of creating scanners repeatedly.
    • + *
    • Allows for caching to be added inside a FlowScanner if desired, but caching is only useful when reused.
    • + *
    * - * Suggested uses: - * - Implement a {@link FlowNodeVisitor} that collects metrics from each FlowNode visited, and call visitAll to extract the data. - * - Find all flownodes of a given type (ex: stages), using {@link #filteredNodes(Collection, Collection, Predicate)} - * - Find the first node with an Error before a specific node - * - Scan through all nodes *just* within a block - * - Use the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} as the head - * - Use the {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} as its blacklist with {@link Collections#singleton(Object)} - * - * TODO: come back and prettify this for HTML-style list formatting. + *

    Suggested uses: + *

      + *
    • Implement a {@link FlowNodeVisitor} that collects metrics from each FlowNode visited, and call visitAll to extract the data.
    • + *
    • Find all flownodes of a given type (ex: stages), using {@link #filteredNodes(Collection, Collection, Predicate)}
    • + *
    • Find the first node with an Error before a specific node
    • + *
    • Scan through all nodes *just* within a block + *
        + *
      • Use the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} as the head
      • + *
      • Use the {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} as its blacklist with {@link Collections#singleton(Object)}
      • + *
    • + *
    * * @author Sam Van Oort */ diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 3794deed..bfaeb43f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -36,10 +36,10 @@ /** Does a simple and somewhat efficient depth-first search of all FlowNodes in the DAG. * - * Iteration order: depth-first search, revisiting parallel branches once done. + *

    Iteration order: depth-first search, revisiting parallel branches once done. * With parallel branches, the first branch is explored, then remaining branches are explored in reverse order. * - * The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. + *

    The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort */ public class DepthFirstScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java index 553ed36e..b869c9b5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java @@ -31,7 +31,8 @@ /** * Interface used when examining a pipeline FlowNode graph node by node, and terminating when a condition is met - * This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} + * + *

    This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} * @author Sam Van Oort */ public interface FlowNodeVisitor { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index ca4cc138..fe02a23c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -42,18 +42,22 @@ /** * Scanner that will scan down all forks when we hit parallel blocks before continuing, but generally runs in linear order - * Think of it as the opposite of {@link DepthFirstScanner}. + *

    Think of it as the opposite of {@link DepthFirstScanner}. * - * This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: - * - Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner) - * - All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner) - * - For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels) + *

    This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: + *

      + *
    • Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner)
    • + *
    • All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner)
    • + *
    • For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels)
    • + *
    * - * The big advantages of this approach: - * - Blocks are visited in the order they end (no backtracking) - helps with working a block at a time - * - Points are visited in linear order within a block (easy to use for analysis) - * - Minimal state information needed - * - Branch information is available for use here + *

    The big advantages of this approach: + *

      + *
    • Blocks are visited in the order they end (no backtracking) - helps with working a block at a time
    • + *
    • Points are visited in linear order within a block (easy to use for analysis)
    • + *
    • Minimal state information needed
    • + *
    • Branch information is available for use here
    • + *
    * * @author Sam Van Oort */ @@ -83,7 +87,7 @@ public boolean isWalkingFromFinish() { return walkingFromFinish; } - /** Tracks state for parallel blocks, so we can ensure all are visited and know the heads */ + /** Tracks state for parallel blocks, so we can ensure all are visited and know the branch starting point */ protected static class ParallelBlockStart { protected BlockStartNode forkStart; // This is the node with child branches protected int remainingBranches; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index bc770ee5..9f385045 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -34,21 +34,24 @@ import java.util.List; /** - * Extension of {@link LinearScanner} that skips nested blocks at the myCurrent level. - * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow). + * Extension of {@link LinearScanner} that skips nested blocks at the current level, useful for finding enclosing blocks. + * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow). * - * This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). + *

    This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). * - * Specifically: - * - Where a {@link BlockEndNode} is encountered, the scanner will jump to the {@link BlockStartNode} and go to its first parent. - * - The only case where you visit branches of a parallel block is if you begin inside it. + *

    Specifically: + *
      + *
    • Where a {@link BlockEndNode} is encountered, the scanner will jump to the {@link BlockStartNode} and go to its first parent.
    • + *
    • The only case where you visit branches of a parallel block is if you begin inside it.
    • + *
    * - * Specific use cases: - * - Finding out the executor workspace used to run a FlowNode - * - Finding the start of the parallel block enclosing the current node - * - Locating the label applying to a given FlowNode (if any) + *

    Specific use cases: + *

      + *
    • Finding out the executor workspace used to run a FlowNode
    • + *
    • Finding the start of the parallel block enclosing the current node
    • + *
    • Locating the label applying to a given FlowNode (if any)
    • + *
    * - * TODO Format me into a tidy HTML list * @author Sam Van Oort */ public class LinearBlockHoppingScanner extends LinearScanner { From 8bb6e6aacb3f76d6f8b05e13c62e019f40c7ef37 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 31 May 2016 12:32:56 -0400 Subject: [PATCH 046/104] More javadocs + package annotation for graphanalysis --- .../plugins/workflow/graphanalysis/Filterator.java | 2 +- .../plugins/workflow/graphanalysis/ForkScanner.java | 2 +- .../plugins/workflow/graphanalysis/LinearScanner.java | 6 +++--- .../plugins/workflow/graphanalysis/package-info.java | 11 +++++++++++ 4 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java index 33b678a0..48289da4 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -31,7 +31,7 @@ /** Iterator that may be navigated through a filtered wrapper. * - * As a rule, assume that returned Filterators wrap an iterator and pass calls to it. + *

    As a rule, assume that returned Filterators wrap an iterator and pass calls to it. * Thus the iterator position will change if next() is called on the filtered versions. * Note also: you may filter a filterator, if needed. * @author Sam Van Oort diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index fe02a23c..05ae4efc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -330,7 +330,7 @@ protected void setHeads(@Nonnull Collection heads) { /** * Return the node that begins the current parallel head - * @return + * @return The FlowNode that marks current parallel start */ @CheckForNull public FlowNode getCurrentParallelStartNode() { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 95a5a549..8f273501 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -34,12 +34,12 @@ /** * Scans through the flow graph in strictly linear fashion, visiting only the first branch in parallel blocks. * - * Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} + *

    Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} * This means that where are parallel branches, we will only visit a partial set of {@link FlowNode}s in the directed acyclic graph. * - * Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. + *

    Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * - * This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. + *

    This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. * @author Sam Van Oort */ public class LinearScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java new file mode 100644 index 00000000..45ef7891 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java @@ -0,0 +1,11 @@ +/** + * Provides a library of methods to work with and analyze the graph of {@link org.jenkinsci.plugins.workflow.graph.FlowNode}s produced from a pipeline execution. + * + *

    The core APIs are described in the javadocs for {@link org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner} + * But in general it provides for iteration through the Directed Acyclic Graph (DAG) of a flow, filtering, search for matches, and + * visiting all nodes via internal iteration. + * + *

    Static methods and a few implementations are also provided in {@link org.jenkinsci.plugins.workflow.graphanalysis.FlowScanningUtils}. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; \ No newline at end of file From 10e2eb451d1eac668a4a00a0fe41d219f80d6e29 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 7 Jun 2016 16:49:57 -0400 Subject: [PATCH 047/104] For graph analysis add a FlowChunk object to use in analysis APIs --- .../workflow/graphanalysis/FlowChunk.java | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java new file mode 100644 index 00000000..c3929d50 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -0,0 +1,80 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Represents one or more FlowNodes in a linear sequence + * + *

    Common uses: + *

      + *
    • A single FlowNode
    • + *
    • A block (with a {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} and {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode})
    • + *
    • A Stage or other arbitrary run of nodes with a beginning and end, determined by a marker
    • + *
    + * @author Sam Van Oort + */ +public interface FlowChunk { + + public enum ChunkType { + NODE, // single node + BLOCK, // block with a BlockStartNode and BlockEndNode + ARBITRARY // Random chunk of data + } + + /** + * Retrieve the starting node + * @param execution Execution for the start and end nodes + * @throws IllegalArgumentException If the start node is not part of the execution given + * @return + */ + @Nonnull + public FlowNode getFirstNode(FlowExecution execution); + + /** + * Retrieve the end node for the block + * @param execution + * @return Null if still in progress + */ + @Nonnull + public FlowNode getLastNode(FlowExecution execution); + + @Nonnull + public String getFirstNodeId(); + + @Nonnull + public String getLastNodeId(); + + /** True if block is finished */ + public boolean isComplete(); + + @Nonnull + public ChunkType getChunkType(); +} From 389543b27b7c571eacc07b5b1e560f6000167334 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 8 Jun 2016 17:02:17 -0400 Subject: [PATCH 048/104] For flow scanning, add an in-memory representation of timed blocks --- .../graphanalysis/MemoryFlowChunk.java | 140 ++++++++++++++++++ .../workflow/graphanalysis/Timeable.java | 11 ++ 2 files changed, 151 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java new file mode 100644 index 00000000..6d96b255 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -0,0 +1,140 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; + +/** + * FlowChunk that holds direct references to the {@link FlowNode} instances + * This makes it easy to use in analysis and visualizations, but inappropriate to retain in caches, etc + * @author Sam Van Oort + */ +public class MemoryFlowChunk implements FlowChunk, Timeable { + + private FlowNode firstNode; + private FlowNode lastNode; + private ChunkType chunkType; + private boolean isComplete = false; + + private long startTimeMillis; + private long endTimeMillis; + private long durationMillis; + private long pauseDurationMillis; + + public MemoryFlowChunk() { + + } + + @Nonnull + @Override + public FlowNode getFirstNode(FlowExecution execution) { + return firstNode; + } + + @Override + public FlowNode getLastNode(FlowExecution execution) { + return lastNode; + } + + @Nonnull + @Override + public String getFirstNodeId() { + return firstNode.getId(); + } + + @Override + public String getLastNodeId() { + return lastNode.getId(); + } + + @Override + public boolean isComplete() { + return false; + } + + @Override + public ChunkType getChunkType() { + return chunkType; + } + + public FlowNode getFirstNode() { + return firstNode; + } + + public void setFirstNode(FlowNode firstNode) { + this.firstNode = firstNode; + } + + public FlowNode getLastNode() { + return lastNode; + } + + public void setLastNode(FlowNode lastNode) { + this.lastNode = lastNode; + } + + public void setIsComplete(boolean isComplete) { + this.isComplete = isComplete; + } + + @Override + public long getStartTimeMillis() { + return startTimeMillis; + } + + public void setStartTimeMillis(long startTimeMillis) { + this.startTimeMillis = startTimeMillis; + } + + @Override + public long getEndTimeMillis() { + return endTimeMillis; + } + + public void setEndTimeMillis(long endTimeMillis) { + this.endTimeMillis = endTimeMillis; + } + + @Override + public long getDurationMillis() { + return durationMillis; + } + + public void setDurationMillis(long durationMillis) { + this.durationMillis = durationMillis; + } + + @Override + public long getPauseDurationMillis() { + return pauseDurationMillis; + } + + public void setPauseDurationMillis(long pauseDurationMillis) { + this.pauseDurationMillis = pauseDurationMillis; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java new file mode 100644 index 00000000..d3163bc6 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java @@ -0,0 +1,11 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +/** + * Something with distinct start and end times + */ +public interface Timeable { + public long getStartTimeMillis(); + public long getEndTimeMillis(); + public long getDurationMillis(); + public long getPauseDurationMillis(); +} From bd426b3442b9e7e925664f5bfef4c7f8c0bef8f3 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 8 Jun 2016 17:27:34 -0400 Subject: [PATCH 049/104] Add a parallel flow chunk for duration computation --- .../graphanalysis/MemoryFlowChunk.java | 4 + .../ParallelMemoryFlowChunk.java | 76 +++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index 6d96b255..c3c451f5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -82,6 +82,10 @@ public ChunkType getChunkType() { return chunkType; } + public void setChunkType(ChunkType type) { + this.chunkType = type; + } + public FlowNode getFirstNode() { return firstNode; } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java new file mode 100644 index 00000000..24c900f7 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -0,0 +1,76 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +package org.jenkinsci.plugins.workflow.graphanalysis; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Corresponds to a parallel block, does some customization to compute the timing with parallel branches + * @author Sam Van Oort + */ +public class ParallelMemoryFlowChunk extends MemoryFlowChunk { + private HashMap branches = new HashMap(); + + @Override + public void setPauseDurationMillis(long pauseDurationMillis) { + throw new UnsupportedOperationException("Can't set pause duration for a parallel block, since it is determined by branches"); + } + + @Override + public void setChunkType(ChunkType type) { + throw new UnsupportedOperationException("Parallel chunk types are always block types, cannot override"); + } + + public ChunkType getChunkType() { + return ChunkType.BLOCK; + } + + public void setBranch(@Nonnull String branchName, @Nonnull MemoryFlowChunk branchBlock) { + if (branchBlock.getChunkType() != ChunkType.BLOCK) { + throw new IllegalArgumentException("All parallel branches must be blocks"); + } + branches.put(branchName, branchBlock); + } + + public Map getBranches() { + return Collections.unmodifiableMap(branches); + } + + @Override + public long getPauseDurationMillis() { + if (branches.size() == 0) { + return 0; + } + long longestPause = 0; + for (Map.Entry branch : branches.entrySet()) { + longestPause = Math.max(longestPause, branch.getValue().getPauseDurationMillis()); + } + return longestPause; + } +} From 2efcba47cbfc794961fbd712b72f9cece523dd37 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 8 Jun 2016 21:09:12 -0400 Subject: [PATCH 050/104] Define interface for chunk storage --- .../graphanalysis/FlowChunkStorage.java | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java new file mode 100644 index 00000000..4e9c8987 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java @@ -0,0 +1,44 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Storage API used to render/store results from a run analysis + * Example: container classes to return from REST APIs + * Think of it as a factory or state storage for results, but using a fluent API + */ +public interface FlowChunkStorage { + /** Returns the container */ + @Nonnull + public CHUNKBASETYPE createBase(); + + /** Creates a block, given a start node (and possibly end node) */ + @Nonnull + public CHUNKBASETYPE createBlockChunk(@Nonnull FlowNode blockStart, @CheckForNull FlowNode blockEnd); + + // TODO parallel and arbitrary run blocks + + /** Complete analysis of chunk and return it (chunk may be contained in other things but never modified further) */ + @Nonnull + public CHUNKBASETYPE finalizeChunk(@Nonnull CHUNKBASETYPE chunk); + + @Nonnull + public CHUNKBASETYPE configureChunk(@Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode); + + + /** Returns the container */ + @Nonnull + public CHUNKBASETYPE addAtomNode(@Nonnull CHUNKBASETYPE container, @Nonnull FlowNode atomNode); + + /** Returns the container */ + @Nonnull + public CHUNKBASETYPE addBlockInside(@Nonnull CHUNKBASETYPE container, @Nonnull CHUNKBASETYPE content); + + /** Returns the container */ + @Nonnull + public CHUNKBASETYPE setTiming(CHUNKBASETYPE base, long startTimeMillis, long endTimeMillis, long durationMillis, long pauseDurationMillis); + +} From 8ebbdc9aecdb108645927087c989b15a55f156be Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 10 Jun 2016 00:56:40 -0400 Subject: [PATCH 051/104] Save simple block visitor --- .../graphanalysis/SimpleBlockVisitor.java | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java new file mode 100644 index 00000000..8b214798 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java @@ -0,0 +1,39 @@ +/* + * The MIT License + * + * Copyright (c) 2016, CloudBees, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Block handling mechanism + * @author Sam Van Oort + */ +public interface SimpleBlockVisitor { + public void blockStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); + public void blockEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); + public void atomNode(@Nonnull FlowNode node, @Nonnull ForkScanner scan); +} From f97a08f513ef7a6396f057fc35aff08b97b6ed0f Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 10 Jun 2016 11:17:33 -0400 Subject: [PATCH 052/104] Basic forkscanner block API --- .../graphanalysis/FlowChunkStorage.java | 1 + .../workflow/graphanalysis/ForkScanner.java | 44 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java index 4e9c8987..3554e67b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java @@ -28,6 +28,7 @@ public interface FlowChunkStorage { @Nonnull public CHUNKBASETYPE configureChunk(@Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode); + // TODO add API for parallel /** Returns the container */ @Nonnull diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 05ae4efc..d23577f8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -24,6 +24,7 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import org.jenkinsci.plugins.workflow.actions.NotExecutedNodeAction; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowEndNode; @@ -441,4 +442,47 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection } return output; } + + public void visitBlocks(@CheckForNull List heads, @Nonnull SimpleBlockVisitor visitor) { + if (!setup(heads)) { + return; + } + + FlowNode previous = null; + FlowNode current = null; + FlowNode next = this.next(); + + if (hasNext()) { + current = next; + next = this.next(); + } else { + return; // No block here + } + + while (this.hasNext()) { + if (current instanceof BlockEndNode) { + visitor.blockEnd(current, next, this); + } else if (current instanceof BlockStartNode) { + visitor.blockStart(current, previous, this); + } else { + visitor.atomNode(current, this); + } + + previous = current; + current = next; + next = next(); + } + + previous = current; + current = next; + next = null; + + if (current instanceof BlockEndNode) { + visitor.blockEnd(current, next, this); + } else if (current instanceof BlockStartNode) { + visitor.blockStart(current, previous, this); + } else { + visitor.atomNode(current, this); + } + } } From 13ef687b6171785ec64d7da055b2d6b261ee7969 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 10 Jun 2016 12:03:31 -0400 Subject: [PATCH 053/104] Add status handling and memory storage engine to graph analysis --- .../graphanalysis/FlowChunkStorage.java | 7 +- .../graphanalysis/MemoryFlowChunk.java | 3 +- .../graphanalysis/MemoryFlowStorage.java | 76 +++++++++++++++++++ .../graphanalysis/ParallelFlowChunk.java | 17 +++++ .../ParallelMemoryFlowChunk.java | 5 +- 5 files changed, 102 insertions(+), 6 deletions(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java index 3554e67b..8ccef13f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java @@ -19,6 +19,9 @@ public interface FlowChunkStorage { @Nonnull public CHUNKBASETYPE createBlockChunk(@Nonnull FlowNode blockStart, @CheckForNull FlowNode blockEnd); + @Nonnull + public CHUNKBASETYPE setStatus(@Nonnull CHUNKBASETYPE chunk, boolean isExecuted, boolean isErrored, boolean isComplete); + // TODO parallel and arbitrary run blocks /** Complete analysis of chunk and return it (chunk may be contained in other things but never modified further) */ @@ -26,9 +29,7 @@ public interface FlowChunkStorage { public CHUNKBASETYPE finalizeChunk(@Nonnull CHUNKBASETYPE chunk); @Nonnull - public CHUNKBASETYPE configureChunk(@Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode); - - // TODO add API for parallel + public CHUNKBASETYPE configureChunk(@Nonnull CHUNKBASETYPE chunk, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode); /** Returns the container */ @Nonnull diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index c3c451f5..37abe756 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -61,6 +61,7 @@ public FlowNode getLastNode(FlowExecution execution) { return lastNode; } + @Nonnull @Override public String getFirstNodeId() { @@ -74,7 +75,7 @@ public String getLastNodeId() { @Override public boolean isComplete() { - return false; + return isComplete; } @Override diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java new file mode 100644 index 00000000..dd8c29c4 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java @@ -0,0 +1,76 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.util.ArrayDeque; + +/** + * Memory-based flow chunk storage for constructing the tree + */ +public class MemoryFlowStorage implements FlowChunkStorage { + + ArrayDeque scopes = new ArrayDeque(); + + @Nonnull + @Override + public MemoryFlowChunk createBase() { + return new MemoryFlowChunk(); + } + + @Nonnull + @Override + public MemoryFlowChunk createBlockChunk(@Nonnull FlowNode blockStart, @CheckForNull FlowNode blockEnd) { + MemoryFlowChunk output = new MemoryFlowChunk(); + output.setFirstNode(blockStart); + output.setLastNode(blockEnd); + return output; + } + + @Nonnull + @Override + public MemoryFlowChunk setStatus(@Nonnull MemoryFlowChunk chunk, boolean isExecuted, boolean isErrored, boolean isComplete) { + return chunk; // NOOP for now + } + + @Nonnull + @Override + public MemoryFlowChunk finalizeChunk(@Nonnull MemoryFlowChunk chunk) { + return chunk; + } + + + @Nonnull + @Override + public MemoryFlowChunk configureChunk(@Nonnull MemoryFlowChunk chunk, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode) { + chunk.setFirstNode(firstNode); + chunk.setLastNode(lastNode); + return chunk; + } + + @Nonnull + @Override + public MemoryFlowChunk addAtomNode(@Nonnull MemoryFlowChunk container, @Nonnull FlowNode atomNode) { + return container; + } + + @Nonnull + @Override + public MemoryFlowChunk addBlockInside(@Nonnull MemoryFlowChunk container, @Nonnull MemoryFlowChunk content) { + if (scopes.peek() == container) { + scopes.push(content); + } + return container; + } + + @Nonnull + @Override + public MemoryFlowChunk setTiming(MemoryFlowChunk base, long startTimeMillis, long endTimeMillis, long durationMillis, long pauseDurationMillis) { + base.setStartTimeMillis(startTimeMillis); + base.setEndTimeMillis(endTimeMillis); + base.setDurationMillis(durationMillis); + base.setPauseDurationMillis(pauseDurationMillis); + return base; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java new file mode 100644 index 00000000..a2a80bb0 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java @@ -0,0 +1,17 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import javax.annotation.Nonnull; +import java.util.Map; + +/** + * Flowchunk that has parallel branches + */ +public interface ParallelFlowChunk extends FlowChunk { + + /** Returns the branches of a parallel flow chunk, mapped by branch name and parallel branch block */ + @Nonnull + public Map getBranches(); + + @Nonnull + public void setBranch(@Nonnull String branchName, @Nonnull MemoryFlowChunk branchBlock); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java index 24c900f7..ace7a98a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -25,7 +25,6 @@ package org.jenkinsci.plugins.workflow.graphanalysis; import javax.annotation.Nonnull; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -34,7 +33,7 @@ * Corresponds to a parallel block, does some customization to compute the timing with parallel branches * @author Sam Van Oort */ -public class ParallelMemoryFlowChunk extends MemoryFlowChunk { +public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk { private HashMap branches = new HashMap(); @Override @@ -58,6 +57,8 @@ public void setBranch(@Nonnull String branchName, @Nonnull MemoryFlowChunk branc branches.put(branchName, branchBlock); } + @Override + @Nonnull public Map getBranches() { return Collections.unmodifiableMap(branches); } From 094cb365a50d4913041ee848cec9f639931cdfba Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 10 Jun 2016 12:17:36 -0400 Subject: [PATCH 054/104] Carve out advanced visitor methods --- .../graphanalysis/AdvancedVisitor.java | 33 +++++++++++++++++++ .../workflow/graphanalysis/ForkScanner.java | 12 +++++++ 2 files changed, 45 insertions(+) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java new file mode 100644 index 00000000..77444dd2 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java @@ -0,0 +1,33 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.ArrayDeque; + +/** + * Fancier visitor class supporting markers, FlowStorage, etc + */ +public abstract class AdvancedVisitor { + protected Predicate markerTest = (Predicate)Predicates.alwaysFalse(); + protected FlowChunkStorage chunkStorage; + + protected ArrayDeque scopes = new ArrayDeque(); + + public AdvancedVisitor(FlowChunkStorage storageEngine) { + this.chunkStorage = storageEngine; + } + + public Predicate getMarkerTest() { + return markerTest; + } + + public void setMarkerTest(Predicate markerTest) { + this.markerTest = markerTest; + } + + /** Visitor core that uses internal info from the scanner */ + public abstract boolean visitSpecial(@Nonnull FlowNode node, @Nonnull ForkScanner scanner); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index d23577f8..f38fecda 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -443,6 +443,18 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection return output; } + public void visitAdvanced(@CheckForNull List heads, @Nonnull AdvancedVisitor visitor) { + if (!setup(heads)) { + return; + } + for (FlowNode f : this) { + boolean canContinue = visitor.visitSpecial(f, this); + if (!canContinue) { + break; + } + } + } + public void visitBlocks(@CheckForNull List heads, @Nonnull SimpleBlockVisitor visitor) { if (!setup(heads)) { return; From 2116580183578ea66766a965b37541cdb2983cf1 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 10 Jun 2016 13:01:52 -0400 Subject: [PATCH 055/104] Advanced visitor for carving up flows (skeletal impl) and parallel handling in storage engines --- .../graphanalysis/AdvancedVisitor.java | 52 ++++++++++++++++++- .../graphanalysis/FlowChunkStorage.java | 8 ++- .../graphanalysis/MemoryFlowStorage.java | 16 ++++++ 3 files changed, 74 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java index 77444dd2..ea0d7ff8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java @@ -6,16 +6,47 @@ import javax.annotation.Nonnull; import java.util.ArrayDeque; +import java.util.Collection; +import java.util.Collections; +import java.util.IdentityHashMap; /** * Fancier visitor class supporting markers, FlowStorage, etc */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings( + value={"URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", + "UUF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", + "UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD"}, + justification="Still implementing") public abstract class AdvancedVisitor { protected Predicate markerTest = (Predicate)Predicates.alwaysFalse(); + + @edu.umd.cs.findbugs.annotations.SuppressWarnings( value="UUF_UNUSED_FIELD", justification="Implementation use in progress") protected FlowChunkStorage chunkStorage; protected ArrayDeque scopes = new ArrayDeque(); + /** Stores ancilliary information on chunks so individual classes don't get cluttered */ + protected IdentityHashMap chunkMetaData = new IdentityHashMap(); + + /** Outputs */ + protected Collection markedChunks = new ArrayDeque(); + + /** Implicit chunk that holds the entire outer scope, may be replaced with a new outer one */ + protected FlowChunk outerScope; + + @edu.umd.cs.findbugs.annotations.SuppressWarnings( value="UUF_UNUSED_FIELD", justification="Implementation use in progress") + protected static class ChunkMetaData { + boolean isComplete; + boolean isExecuted; + boolean isErrored; + } + + /** Override me: if true, we care about a given block type based on its start node type */ + public boolean careAboutBlock(FlowNode blockStartNode) { + return true; + } + public AdvancedVisitor(FlowChunkStorage storageEngine) { this.chunkStorage = storageEngine; } @@ -28,6 +59,25 @@ public void setMarkerTest(Predicate markerTest) { this.markerTest = markerTest; } + public Collection getMarkedChunks() { + return Collections.unmodifiableCollection(markedChunks); + } + + public ArrayDeque getScopes() { + return scopes; + } + /** Visitor core that uses internal info from the scanner */ - public abstract boolean visitSpecial(@Nonnull FlowNode node, @Nonnull ForkScanner scanner); + public boolean visitSpecial(@Nonnull FlowNode node, @Nonnull ForkScanner scanner) { + // TODO Push/pop to arrayDeques of scopes with blocks, filtering on careAboutBlock and not adding if failed + // Marker check, add to markers as needed + // TODO invocations of storage for parallels + // TODO save the timing information using very simple calculations + // TODO recursive add/calc of timing -- add up the tree of block scopes + + + // TODO when we add a new (incomplete) enclosing block, add the pause times of children + + return true; + } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java index 8ccef13f..08cb9695 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java @@ -22,7 +22,11 @@ public interface FlowChunkStorage { @Nonnull public CHUNKBASETYPE setStatus(@Nonnull CHUNKBASETYPE chunk, boolean isExecuted, boolean isErrored, boolean isComplete); - // TODO parallel and arbitrary run blocks + @Nonnull + public CHUNKBASETYPE createParallelChunk(); // Return type must implement ParallelFlowChunk + + @Nonnull + public CHUNKBASETYPE addBranchToParallel(@Nonnull CHUNKBASETYPE parallelContainer, String branchName, CHUNKBASETYPE branch); /** Complete analysis of chunk and return it (chunk may be contained in other things but never modified further) */ @Nonnull @@ -39,6 +43,8 @@ public interface FlowChunkStorage { @Nonnull public CHUNKBASETYPE addBlockInside(@Nonnull CHUNKBASETYPE container, @Nonnull CHUNKBASETYPE content); + // TODO adder API for chunks + /** Returns the container */ @Nonnull public CHUNKBASETYPE setTiming(CHUNKBASETYPE base, long startTimeMillis, long endTimeMillis, long durationMillis, long pauseDurationMillis); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java index dd8c29c4..e99bbb0c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java @@ -34,6 +34,22 @@ public MemoryFlowChunk setStatus(@Nonnull MemoryFlowChunk chunk, boolean isExecu return chunk; // NOOP for now } + @Nonnull + @Override + public MemoryFlowChunk createParallelChunk() { + return new ParallelMemoryFlowChunk(); + } + + @Nonnull + @Override + public MemoryFlowChunk addBranchToParallel(@Nonnull MemoryFlowChunk parallelContainer, String branchName, MemoryFlowChunk branch) { + if (! (parallelContainer instanceof ParallelMemoryFlowChunk)) { + throw new IllegalArgumentException("Can't add a parallel branch to a container that is not a ParallelMemoryFlowStorage"); + } + ((ParallelMemoryFlowChunk)parallelContainer).setBranch(branchName, branch); + return parallelContainer; + } + @Nonnull @Override public MemoryFlowChunk finalizeChunk(@Nonnull MemoryFlowChunk chunk) { From 698d0d347cf59ad28eee4868e84f5d48a82b1827 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Sun, 24 Jul 2016 18:58:10 -0400 Subject: [PATCH 056/104] Chunk and block storage types --- .../graphanalysis/AdvancedVisitor.java | 37 +++-- .../workflow/graphanalysis/BlockVisitor.java | 129 ++++++++++++++++++ .../workflow/graphanalysis/FlowChunk.java | 22 ++- .../graphanalysis/FlowChunkStorage.java | 25 +++- .../workflow/graphanalysis/ForkScanner.java | 18 +++ .../graphanalysis/MemoryFlowChunk.java | 2 +- .../graphanalysis/MemoryFlowStorage.java | 2 +- .../graphanalysis/SimpleBlockVisitor.java | 5 + 8 files changed, 218 insertions(+), 22 deletions(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java index ea0d7ff8..3a40d631 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java @@ -2,6 +2,8 @@ import com.google.common.base.Predicate; import com.google.common.base.Predicates; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; @@ -18,7 +20,10 @@ "UUF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", "UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD"}, justification="Still implementing") -public abstract class AdvancedVisitor { +public class AdvancedVisitor { + + // FIXME for purposes of tracking nodes *after* a block you might use a HashMap + protected Predicate markerTest = (Predicate)Predicates.alwaysFalse(); @edu.umd.cs.findbugs.annotations.SuppressWarnings( value="UUF_UNUSED_FIELD", justification="Implementation use in progress") @@ -29,7 +34,7 @@ public abstract class AdvancedVisitor { /** Stores ancilliary information on chunks so individual classes don't get cluttered */ protected IdentityHashMap chunkMetaData = new IdentityHashMap(); - /** Outputs */ + /** Outputs -- think stages */ protected Collection markedChunks = new ArrayDeque(); /** Implicit chunk that holds the entire outer scope, may be replaced with a new outer one */ @@ -42,19 +47,19 @@ protected static class ChunkMetaData { boolean isErrored; } - /** Override me: if true, we care about a given block type based on its start node type */ - public boolean careAboutBlock(FlowNode blockStartNode) { - return true; - } - public AdvancedVisitor(FlowChunkStorage storageEngine) { this.chunkStorage = storageEngine; + outerScope = storageEngine.createBase(); } public Predicate getMarkerTest() { return markerTest; } + /** Marks chunks of interest, for example stages, if true we start a new chunk of interest + * Every time the predicate evaluates true, a new marked chunk is begun. + * A common one would be a predicate that tells you when you've hit the start of a stage + */ public void setMarkerTest(Predicate markerTest) { this.markerTest = markerTest; } @@ -69,15 +74,27 @@ public ArrayDeque getScopes() { /** Visitor core that uses internal info from the scanner */ public boolean visitSpecial(@Nonnull FlowNode node, @Nonnull ForkScanner scanner) { + + if (getMarkerTest().apply(node)) { + // + } + if (node instanceof BlockEndNode) { + + } else if (node instanceof BlockStartNode) { + // check if end for current start, otherwise create a new block above current + } else { + + } + // TODO Push/pop to arrayDeques of scopes with blocks, filtering on careAboutBlock and not adding if failed // Marker check, add to markers as needed // TODO invocations of storage for parallels - // TODO save the timing information using very simple calculations + // TODO save the timing information using very simple calculations from the pipeline-graph-analysis-plugin // TODO recursive add/calc of timing -- add up the tree of block scopes - - // TODO when we add a new (incomplete) enclosing block, add the pause times of children + // Timing note, if we can track linear pause times and branchwise pause times life will be easier + return true; } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java new file mode 100644 index 00000000..514d69f6 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java @@ -0,0 +1,129 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowEndNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.util.ArrayDeque; + +/** + * Visitor that stores a list of block scopes + * This MUST be coupled with the ForkScanner to work correctly because of its iteration order + * This is because it guarantees block-scoped traversal, where every end occurs before a start + * Created by svanoort on 5/12/16. + */ +@SuppressFBWarnings +public class BlockVisitor implements FlowNodeVisitor { + + protected ArrayDeque scopes = new ArrayDeque(); + protected IdFlowBlock currentBlock = new IdFlowBlock(); + + public static interface FlowBlock { + @CheckForNull + public String getBlockStartNodeId(); + + @CheckForNull + public String getBlockEndNodeId(); + + @CheckForNull + public String getFirstChildId(); + + @CheckForNull + public String getLastChildId(); + } + + public class IdFlowBlock implements FlowBlock { + private String blockStartNodeId; + private String blockEndNodeId; + private String firstChildId; + private String lastChildId; + + public String getBlockStartNodeId() { + return blockStartNodeId; + } + + public void setBlockStartNodeId(String blockStartNodeId) { + this.blockStartNodeId = blockStartNodeId; + } + + public String getBlockEndNodeId() { + return blockEndNodeId; + } + + public void setBlockEndNodeId(String blockEndNodeId) { + this.blockEndNodeId = blockEndNodeId; + } + + public String getFirstChildId() { + return firstChildId; + } + + public void setFirstChildId(String firstChildId) { + this.firstChildId = firstChildId; + } + + public String getLastChildId() { + return lastChildId; + } + + public void setLastChildId(String lastChildId) { + this.lastChildId = lastChildId; + } + } + + // Block is closed, we pop it off the scope and do what we want with it + protected void popBlock() { + this.currentBlock = this.scopes.pop(); + } + + /** + * Enter a new block scope + * @param block Block that starts then new scope + */ + protected void pushBlock(@Nonnull IdFlowBlock block) { + this.scopes.push(this.currentBlock); + this.currentBlock = block; + } + + protected void addBlockChild(@Nonnull FlowNode f) { + if (currentBlock.getLastChildId() != null) { + currentBlock.setLastChildId(f.getId()); + } + currentBlock.setFirstChildId(f.getId()); + } + + /** + * Visit the flow node, and indicate if we should continue analysis + * + * @param f Node to visit + * @return False if we should stop visiting nodes + */ + public boolean visit(@Nonnull FlowNode f) { + if (f instanceof BlockEndNode) { + IdFlowBlock innerBlock = new IdFlowBlock(); + innerBlock.setBlockEndNodeId(f.getId()); + innerBlock.setBlockStartNodeId(((BlockEndNode) f).getId()); + pushBlock(innerBlock); + } else if (f instanceof BlockStartNode) { + String currentStartId = currentBlock.getBlockStartNodeId(); + if (currentStartId != null && currentBlock.getBlockStartNodeId() != null + && (currentStartId.equals(f.getId())) ) { + // We're done with this block's scope, move up one level + popBlock(); + } else { + // We're inside an unterminated block, add an empty block scope above it to contain it and pop off the current block + IdFlowBlock block = new IdFlowBlock(); + currentBlock.setBlockStartNodeId(f.getId()); + scopes.offer(new IdFlowBlock()); + popBlock(); + } + } else { // We're inside the current block + addBlockChild(f); + } + return true; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java index c3929d50..d72e94d2 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -31,22 +31,33 @@ import javax.annotation.Nonnull; /** - * Represents one or more FlowNodes in a linear sequence + * Common container interface for a series of {@link FlowNode}s with a logical start and end. + *

    We use this because every plugin has a different way of storing info about the nodes. * *

    Common uses: *

      - *
    • A single FlowNode
    • + *
    • A single FlowNode (when coupling with timing/status APIs)
    • *
    • A block (with a {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} and {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode})
    • - *
    • A Stage or other arbitrary run of nodes with a beginning and end, determined by a marker
    • + *
    • A linear run of marked nodes (such as a legacy stage)
    • + *
    • A parallel block (special case of block)
    • + *
    • A parallel branch within a parallel block
    • + *
    • A mix of types in sequence, such as nested structures
    • *
    + * * @author Sam Van Oort */ public interface FlowChunk { + /** Since libraries have radically different internal storage, we need a way to distinguish what type a chunk is + * We might replace this with marker interfaces or boolean flags about types. + */ public enum ChunkType { NODE, // single node BLOCK, // block with a BlockStartNode and BlockEndNode - ARBITRARY // Random chunk of data + LINEAR, // stage is this, or something else with a marker + PARALLEL_BRANCH, + PARALLEL_BLOCK, + MIXED // Random chunk of data } /** @@ -61,6 +72,7 @@ public enum ChunkType { /** * Retrieve the end node for the block * @param execution + * @throws IllegalArgumentException If the start node is not part of the execution given * @return Null if still in progress */ @Nonnull @@ -73,7 +85,7 @@ public enum ChunkType { public String getLastNodeId(); /** True if block is finished */ - public boolean isComplete(); + public boolean isBalancedBlock(); @Nonnull public ChunkType getChunkType(); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java index 08cb9695..83cc1556 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java @@ -9,9 +9,14 @@ * Storage API used to render/store results from a run analysis * Example: container classes to return from REST APIs * Think of it as a factory or state storage for results, but using a fluent API + * + * This is used to build whatever Directed Acyclic Graph output you use (an API response object) + * This creates container types for your final output, and then callbacks are made to them + * + * Couples tightly to {@link AdvancedVisitor} which issues all the calls to this, to contribute information. */ public interface FlowChunkStorage { - /** Returns the container */ + /** Returns a basic container for arbitrary nodes */ @Nonnull public CHUNKBASETYPE createBase(); @@ -19,33 +24,43 @@ public interface FlowChunkStorage { @Nonnull public CHUNKBASETYPE createBlockChunk(@Nonnull FlowNode blockStart, @CheckForNull FlowNode blockEnd); + /** Convert a series of status flags to a final output result */ @Nonnull public CHUNKBASETYPE setStatus(@Nonnull CHUNKBASETYPE chunk, boolean isExecuted, boolean isErrored, boolean isComplete); + /** Create a new parallel chunk container type */ @Nonnull public CHUNKBASETYPE createParallelChunk(); // Return type must implement ParallelFlowChunk + /** Yup, add in our parallel branch */ @Nonnull public CHUNKBASETYPE addBranchToParallel(@Nonnull CHUNKBASETYPE parallelContainer, String branchName, CHUNKBASETYPE branch); - /** Complete analysis of chunk and return it (chunk may be contained in other things but never modified further) */ + /** Complete analysis of chunk and return it (chunk may be contained in other things but never modified further) + * May be a no-op in many cases + */ @Nonnull public CHUNKBASETYPE finalizeChunk(@Nonnull CHUNKBASETYPE chunk); @Nonnull public CHUNKBASETYPE configureChunk(@Nonnull CHUNKBASETYPE chunk, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode); - /** Returns the container */ + /** Using internal representation, *POSSIBLE* store a node to the given chunk. + * The FlowChunkStorage may decide whether or not it cares about the FlowNode, + * and whether or not chunks should store nodes. + * For example, some chunks may store an internal list of nodes. + */ @Nonnull public CHUNKBASETYPE addAtomNode(@Nonnull CHUNKBASETYPE container, @Nonnull FlowNode atomNode); + // Some sort of adder for flownode that shows the context for timing? + /** Returns the container */ @Nonnull public CHUNKBASETYPE addBlockInside(@Nonnull CHUNKBASETYPE container, @Nonnull CHUNKBASETYPE content); - // TODO adder API for chunks - /** Returns the container */ + /** Configure timing for the given chunk */ @Nonnull public CHUNKBASETYPE setTiming(CHUNKBASETYPE base, long startTimeMillis, long endTimeMillis, long durationMillis, long pauseDurationMillis); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index f38fecda..bc35541f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -443,6 +443,24 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection return output; } + /** + * Sample call: + * + * ForkScanner scan = new ForkScanner(); + * ApiResponseObject output = new ApiResponseObject(); // Implements FlowChunkStorage and builds up a DAG response + * AdvancedVisitor visit = new AdvancedVisitor(output); + * scan.visitAdvanced(flowExecution.getCurrentHeads(), visit); + * return output; // Configured response object + * //OR + * return output.getGraphObject(); + * + * Alternately: + * return visit.getMarkedChunks(); // List of \ that might be stages if you like + * + * + * @param heads + * @param visitor + */ public void visitAdvanced(@CheckForNull List heads, @Nonnull AdvancedVisitor visitor) { if (!setup(heads)) { return; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index 37abe756..dc665160 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -74,7 +74,7 @@ public String getLastNodeId() { } @Override - public boolean isComplete() { + public boolean isBalancedBlock() { return isComplete; } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java index e99bbb0c..eaeef216 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java @@ -36,7 +36,7 @@ public MemoryFlowChunk setStatus(@Nonnull MemoryFlowChunk chunk, boolean isExecu @Nonnull @Override - public MemoryFlowChunk createParallelChunk() { + public ParallelMemoryFlowChunk createParallelChunk() { return new ParallelMemoryFlowChunk(); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java index 8b214798..937345cc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java @@ -33,7 +33,12 @@ * @author Sam Van Oort */ public interface SimpleBlockVisitor { + /** Called when hitting the start of a block */ public void blockStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); + + /** Called when hitting the end of a block */ public void blockEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); + + /** Called when encountering a node inside a block (may be the implicit outer block though) */ public void atomNode(@Nonnull FlowNode node, @Nonnull ForkScanner scan); } From 64ec619605ed35f8e0995226a2afc64b9b7ad21c Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Sun, 24 Jul 2016 19:07:28 -0400 Subject: [PATCH 057/104] Customize SNAPSHOT version so we can distinguish this from master snapshot --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 19be6323..747884ce 100644 --- a/pom.xml +++ b/pom.xml @@ -33,7 +33,7 @@ org.jenkins-ci.plugins.workflow workflow-api - 2.2-SNAPSHOT + 2.2.blockapis-SNAPSHOT hpi Pipeline: API https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+API+Plugin From 9efad20670d2989d519785e4711bdf7e627e7c6d Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 5 Aug 2016 15:20:34 -0400 Subject: [PATCH 058/104] Remove all the parts not needed for raw block scanning --- .gitignore.orig | 33 ++++++ .../graphanalysis/AdvancedVisitor.java | 100 ------------------ .../workflow/graphanalysis/FlowChunk.java | 41 +------ .../graphanalysis/FlowChunkStorage.java | 67 ------------ .../graphanalysis/FlowChunkWithContext.java | 17 +++ .../workflow/graphanalysis/ForkScanner.java | 43 ++------ .../graphanalysis/MemoryFlowChunk.java | 98 ++++------------- .../graphanalysis/MemoryFlowStorage.java | 92 ---------------- .../graphanalysis/ParallelFlowChunk.java | 8 +- .../ParallelMemoryFlowChunk.java | 33 ++---- .../graphanalysis/SimpleBlockVisitor.java | 24 ++++- .../workflow/graphanalysis/Timeable.java | 11 -- 12 files changed, 108 insertions(+), 459 deletions(-) create mode 100644 .gitignore.orig delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java diff --git a/.gitignore.orig b/.gitignore.orig new file mode 100644 index 00000000..1f64f169 --- /dev/null +++ b/.gitignore.orig @@ -0,0 +1,33 @@ +target +work +<<<<<<< HEAD + +# IntelliJ project files +*.iml +*.iws +*.ipr +.idea +out + +# eclipse project file +.settings +.classpath +.project +build + +# vim +*~ +*.swp + +# ctags +tags + +# OS X +.DS_Store + +# mvn versions:set +pom.xml.versionsBackup +======= +.idea +*.iml +>>>>>>> master diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java deleted file mode 100644 index 3a40d631..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AdvancedVisitor.java +++ /dev/null @@ -1,100 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import org.jenkinsci.plugins.workflow.graph.BlockEndNode; -import org.jenkinsci.plugins.workflow.graph.BlockStartNode; -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.Nonnull; -import java.util.ArrayDeque; -import java.util.Collection; -import java.util.Collections; -import java.util.IdentityHashMap; - -/** - * Fancier visitor class supporting markers, FlowStorage, etc - */ -@edu.umd.cs.findbugs.annotations.SuppressWarnings( - value={"URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", - "UUF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", - "UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD"}, - justification="Still implementing") -public class AdvancedVisitor { - - // FIXME for purposes of tracking nodes *after* a block you might use a HashMap - - protected Predicate markerTest = (Predicate)Predicates.alwaysFalse(); - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( value="UUF_UNUSED_FIELD", justification="Implementation use in progress") - protected FlowChunkStorage chunkStorage; - - protected ArrayDeque scopes = new ArrayDeque(); - - /** Stores ancilliary information on chunks so individual classes don't get cluttered */ - protected IdentityHashMap chunkMetaData = new IdentityHashMap(); - - /** Outputs -- think stages */ - protected Collection markedChunks = new ArrayDeque(); - - /** Implicit chunk that holds the entire outer scope, may be replaced with a new outer one */ - protected FlowChunk outerScope; - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( value="UUF_UNUSED_FIELD", justification="Implementation use in progress") - protected static class ChunkMetaData { - boolean isComplete; - boolean isExecuted; - boolean isErrored; - } - - public AdvancedVisitor(FlowChunkStorage storageEngine) { - this.chunkStorage = storageEngine; - outerScope = storageEngine.createBase(); - } - - public Predicate getMarkerTest() { - return markerTest; - } - - /** Marks chunks of interest, for example stages, if true we start a new chunk of interest - * Every time the predicate evaluates true, a new marked chunk is begun. - * A common one would be a predicate that tells you when you've hit the start of a stage - */ - public void setMarkerTest(Predicate markerTest) { - this.markerTest = markerTest; - } - - public Collection getMarkedChunks() { - return Collections.unmodifiableCollection(markedChunks); - } - - public ArrayDeque getScopes() { - return scopes; - } - - /** Visitor core that uses internal info from the scanner */ - public boolean visitSpecial(@Nonnull FlowNode node, @Nonnull ForkScanner scanner) { - - if (getMarkerTest().apply(node)) { - // - } - if (node instanceof BlockEndNode) { - - } else if (node instanceof BlockStartNode) { - // check if end for current start, otherwise create a new block above current - } else { - - } - - // TODO Push/pop to arrayDeques of scopes with blocks, filtering on careAboutBlock and not adding if failed - // Marker check, add to markers as needed - // TODO invocations of storage for parallels - // TODO save the timing information using very simple calculations from the pipeline-graph-analysis-plugin - // TODO recursive add/calc of timing -- add up the tree of block scopes - // TODO when we add a new (incomplete) enclosing block, add the pause times of children - - // Timing note, if we can track linear pause times and branchwise pause times life will be easier - - return true; - } -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java index d72e94d2..b0951ead 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -47,46 +47,9 @@ * @author Sam Van Oort */ public interface FlowChunk { - - /** Since libraries have radically different internal storage, we need a way to distinguish what type a chunk is - * We might replace this with marker interfaces or boolean flags about types. - */ - public enum ChunkType { - NODE, // single node - BLOCK, // block with a BlockStartNode and BlockEndNode - LINEAR, // stage is this, or something else with a marker - PARALLEL_BRANCH, - PARALLEL_BLOCK, - MIXED // Random chunk of data - } - - /** - * Retrieve the starting node - * @param execution Execution for the start and end nodes - * @throws IllegalArgumentException If the start node is not part of the execution given - * @return - */ @Nonnull - public FlowNode getFirstNode(FlowExecution execution); - - /** - * Retrieve the end node for the block - * @param execution - * @throws IllegalArgumentException If the start node is not part of the execution given - * @return Null if still in progress - */ - @Nonnull - public FlowNode getLastNode(FlowExecution execution); - - @Nonnull - public String getFirstNodeId(); - - @Nonnull - public String getLastNodeId(); - - /** True if block is finished */ - public boolean isBalancedBlock(); + public FlowNode getFirstNode(); @Nonnull - public ChunkType getChunkType(); + public FlowNode getLastNode(); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java deleted file mode 100644 index 83cc1556..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkStorage.java +++ /dev/null @@ -1,67 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -/** - * Storage API used to render/store results from a run analysis - * Example: container classes to return from REST APIs - * Think of it as a factory or state storage for results, but using a fluent API - * - * This is used to build whatever Directed Acyclic Graph output you use (an API response object) - * This creates container types for your final output, and then callbacks are made to them - * - * Couples tightly to {@link AdvancedVisitor} which issues all the calls to this, to contribute information. - */ -public interface FlowChunkStorage { - /** Returns a basic container for arbitrary nodes */ - @Nonnull - public CHUNKBASETYPE createBase(); - - /** Creates a block, given a start node (and possibly end node) */ - @Nonnull - public CHUNKBASETYPE createBlockChunk(@Nonnull FlowNode blockStart, @CheckForNull FlowNode blockEnd); - - /** Convert a series of status flags to a final output result */ - @Nonnull - public CHUNKBASETYPE setStatus(@Nonnull CHUNKBASETYPE chunk, boolean isExecuted, boolean isErrored, boolean isComplete); - - /** Create a new parallel chunk container type */ - @Nonnull - public CHUNKBASETYPE createParallelChunk(); // Return type must implement ParallelFlowChunk - - /** Yup, add in our parallel branch */ - @Nonnull - public CHUNKBASETYPE addBranchToParallel(@Nonnull CHUNKBASETYPE parallelContainer, String branchName, CHUNKBASETYPE branch); - - /** Complete analysis of chunk and return it (chunk may be contained in other things but never modified further) - * May be a no-op in many cases - */ - @Nonnull - public CHUNKBASETYPE finalizeChunk(@Nonnull CHUNKBASETYPE chunk); - - @Nonnull - public CHUNKBASETYPE configureChunk(@Nonnull CHUNKBASETYPE chunk, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode); - - /** Using internal representation, *POSSIBLE* store a node to the given chunk. - * The FlowChunkStorage may decide whether or not it cares about the FlowNode, - * and whether or not chunks should store nodes. - * For example, some chunks may store an internal list of nodes. - */ - @Nonnull - public CHUNKBASETYPE addAtomNode(@Nonnull CHUNKBASETYPE container, @Nonnull FlowNode atomNode); - - // Some sort of adder for flownode that shows the context for timing? - - /** Returns the container */ - @Nonnull - public CHUNKBASETYPE addBlockInside(@Nonnull CHUNKBASETYPE container, @Nonnull CHUNKBASETYPE content); - - - /** Configure timing for the given chunk */ - @Nonnull - public CHUNKBASETYPE setTiming(CHUNKBASETYPE base, long startTimeMillis, long endTimeMillis, long durationMillis, long pauseDurationMillis); - -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java new file mode 100644 index 00000000..d6527989 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java @@ -0,0 +1,17 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; + +/** FlowChunk with information about what comes before/after */ +public interface FlowChunkWithContext extends FlowChunk { + + /** Return the node before this chunk, or null if it is the end */ + @CheckForNull + public FlowNode getNodeBefore(); + + /** Return the node after this chunk, or null if it is the end */ + @CheckForNull + public FlowNode getNodeAfter(); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index bc35541f..2608fcf5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -24,7 +24,6 @@ package org.jenkinsci.plugins.workflow.graphanalysis; -import org.jenkinsci.plugins.workflow.actions.NotExecutedNodeAction; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowEndNode; @@ -443,36 +442,6 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection return output; } - /** - * Sample call: - * - * ForkScanner scan = new ForkScanner(); - * ApiResponseObject output = new ApiResponseObject(); // Implements FlowChunkStorage and builds up a DAG response - * AdvancedVisitor visit = new AdvancedVisitor(output); - * scan.visitAdvanced(flowExecution.getCurrentHeads(), visit); - * return output; // Configured response object - * //OR - * return output.getGraphObject(); - * - * Alternately: - * return visit.getMarkedChunks(); // List of \ that might be stages if you like - * - * - * @param heads - * @param visitor - */ - public void visitAdvanced(@CheckForNull List heads, @Nonnull AdvancedVisitor visitor) { - if (!setup(heads)) { - return; - } - for (FlowNode f : this) { - boolean canContinue = visitor.visitSpecial(f, this); - if (!canContinue) { - break; - } - } - } - public void visitBlocks(@CheckForNull List heads, @Nonnull SimpleBlockVisitor visitor) { if (!setup(heads)) { return; @@ -491,11 +460,11 @@ public void visitBlocks(@CheckForNull List heads, @Nonnull SimpleBlock while (this.hasNext()) { if (current instanceof BlockEndNode) { - visitor.blockEnd(current, next, this); + visitor.chunkEnd(current, next, this); } else if (current instanceof BlockStartNode) { - visitor.blockStart(current, previous, this); + visitor.chunkStart(current, previous, this); } else { - visitor.atomNode(current, this); + visitor.atomNode(next, current, previous, this); } previous = current; @@ -508,11 +477,11 @@ public void visitBlocks(@CheckForNull List heads, @Nonnull SimpleBlock next = null; if (current instanceof BlockEndNode) { - visitor.blockEnd(current, next, this); + visitor.chunkEnd(current, next, this); } else if (current instanceof BlockStartNode) { - visitor.blockStart(current, previous, this); + visitor.chunkStart(current, previous, this); } else { - visitor.atomNode(current, this); + visitor.atomNode(next, current, previous, this); } } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index dc665160..eb7df8c4 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -24,69 +24,31 @@ package org.jenkinsci.plugins.workflow.graphanalysis; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.FlowNode; +import javax.annotation.CheckForNull; import javax.annotation.Nonnull; /** - * FlowChunk that holds direct references to the {@link FlowNode} instances + * FlowChunk that holds direct references to the {@link FlowNode} instances and context info * This makes it easy to use in analysis and visualizations, but inappropriate to retain in caches, etc * @author Sam Van Oort */ -public class MemoryFlowChunk implements FlowChunk, Timeable { - +public class MemoryFlowChunk implements FlowChunkWithContext { private FlowNode firstNode; private FlowNode lastNode; - private ChunkType chunkType; - private boolean isComplete = false; - - private long startTimeMillis; - private long endTimeMillis; - private long durationMillis; - private long pauseDurationMillis; - - public MemoryFlowChunk() { - - } - - @Nonnull - @Override - public FlowNode getFirstNode(FlowExecution execution) { - return firstNode; - } + private FlowNode nodeBefore; + private FlowNode nodeAfter; - @Override - public FlowNode getLastNode(FlowExecution execution) { - return lastNode; + public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { + this.setNodeBefore(before); + this.setFirstNode(firstNode); + this.setLastNode(lastNode); + this.setNodeAfter(lastNode); } - @Nonnull @Override - public String getFirstNodeId() { - return firstNode.getId(); - } - - @Override - public String getLastNodeId() { - return lastNode.getId(); - } - - @Override - public boolean isBalancedBlock() { - return isComplete; - } - - @Override - public ChunkType getChunkType() { - return chunkType; - } - - public void setChunkType(ChunkType type) { - this.chunkType = type; - } - public FlowNode getFirstNode() { return firstNode; } @@ -95,6 +57,8 @@ public void setFirstNode(FlowNode firstNode) { this.firstNode = firstNode; } + @Nonnull + @Override public FlowNode getLastNode() { return lastNode; } @@ -103,43 +67,21 @@ public void setLastNode(FlowNode lastNode) { this.lastNode = lastNode; } - public void setIsComplete(boolean isComplete) { - this.isComplete = isComplete; - } - - @Override - public long getStartTimeMillis() { - return startTimeMillis; - } - - public void setStartTimeMillis(long startTimeMillis) { - this.startTimeMillis = startTimeMillis; - } - - @Override - public long getEndTimeMillis() { - return endTimeMillis; - } - - public void setEndTimeMillis(long endTimeMillis) { - this.endTimeMillis = endTimeMillis; - } - @Override - public long getDurationMillis() { - return durationMillis; + public FlowNode getNodeBefore() { + return nodeBefore; } - public void setDurationMillis(long durationMillis) { - this.durationMillis = durationMillis; + public void setNodeBefore(FlowNode nodeBefore) { + this.nodeBefore = nodeBefore; } @Override - public long getPauseDurationMillis() { - return pauseDurationMillis; + public FlowNode getNodeAfter() { + return nodeAfter; } - public void setPauseDurationMillis(long pauseDurationMillis) { - this.pauseDurationMillis = pauseDurationMillis; + public void setNodeAfter(FlowNode nodeAfter) { + this.nodeAfter = nodeAfter; } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java deleted file mode 100644 index eaeef216..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowStorage.java +++ /dev/null @@ -1,92 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import java.util.ArrayDeque; - -/** - * Memory-based flow chunk storage for constructing the tree - */ -public class MemoryFlowStorage implements FlowChunkStorage { - - ArrayDeque scopes = new ArrayDeque(); - - @Nonnull - @Override - public MemoryFlowChunk createBase() { - return new MemoryFlowChunk(); - } - - @Nonnull - @Override - public MemoryFlowChunk createBlockChunk(@Nonnull FlowNode blockStart, @CheckForNull FlowNode blockEnd) { - MemoryFlowChunk output = new MemoryFlowChunk(); - output.setFirstNode(blockStart); - output.setLastNode(blockEnd); - return output; - } - - @Nonnull - @Override - public MemoryFlowChunk setStatus(@Nonnull MemoryFlowChunk chunk, boolean isExecuted, boolean isErrored, boolean isComplete) { - return chunk; // NOOP for now - } - - @Nonnull - @Override - public ParallelMemoryFlowChunk createParallelChunk() { - return new ParallelMemoryFlowChunk(); - } - - @Nonnull - @Override - public MemoryFlowChunk addBranchToParallel(@Nonnull MemoryFlowChunk parallelContainer, String branchName, MemoryFlowChunk branch) { - if (! (parallelContainer instanceof ParallelMemoryFlowChunk)) { - throw new IllegalArgumentException("Can't add a parallel branch to a container that is not a ParallelMemoryFlowStorage"); - } - ((ParallelMemoryFlowChunk)parallelContainer).setBranch(branchName, branch); - return parallelContainer; - } - - @Nonnull - @Override - public MemoryFlowChunk finalizeChunk(@Nonnull MemoryFlowChunk chunk) { - return chunk; - } - - - @Nonnull - @Override - public MemoryFlowChunk configureChunk(@Nonnull MemoryFlowChunk chunk, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode) { - chunk.setFirstNode(firstNode); - chunk.setLastNode(lastNode); - return chunk; - } - - @Nonnull - @Override - public MemoryFlowChunk addAtomNode(@Nonnull MemoryFlowChunk container, @Nonnull FlowNode atomNode) { - return container; - } - - @Nonnull - @Override - public MemoryFlowChunk addBlockInside(@Nonnull MemoryFlowChunk container, @Nonnull MemoryFlowChunk content) { - if (scopes.peek() == container) { - scopes.push(content); - } - return container; - } - - @Nonnull - @Override - public MemoryFlowChunk setTiming(MemoryFlowChunk base, long startTimeMillis, long endTimeMillis, long durationMillis, long pauseDurationMillis) { - base.setStartTimeMillis(startTimeMillis); - base.setEndTimeMillis(endTimeMillis); - base.setDurationMillis(durationMillis); - base.setPauseDurationMillis(pauseDurationMillis); - return base; - } -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java index a2a80bb0..a98abe13 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java @@ -4,14 +4,14 @@ import java.util.Map; /** - * Flowchunk that has parallel branches + * FlowChunk that has parallel branches */ -public interface ParallelFlowChunk extends FlowChunk { +public interface ParallelFlowChunk extends FlowChunk { /** Returns the branches of a parallel flow chunk, mapped by branch name and parallel branch block */ @Nonnull - public Map getBranches(); + public Map getBranches(); @Nonnull - public void setBranch(@Nonnull String branchName, @Nonnull MemoryFlowChunk branchBlock); + public void setBranch(@Nonnull String branchName, @Nonnull ChunkType branchBlock); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java index ace7a98a..f7494b88 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -24,6 +24,9 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import java.util.Collections; import java.util.HashMap; @@ -33,27 +36,14 @@ * Corresponds to a parallel block, does some customization to compute the timing with parallel branches * @author Sam Van Oort */ -public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk { +public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk { private HashMap branches = new HashMap(); - @Override - public void setPauseDurationMillis(long pauseDurationMillis) { - throw new UnsupportedOperationException("Can't set pause duration for a parallel block, since it is determined by branches"); - } - - @Override - public void setChunkType(ChunkType type) { - throw new UnsupportedOperationException("Parallel chunk types are always block types, cannot override"); - } - - public ChunkType getChunkType() { - return ChunkType.BLOCK; + public ParallelMemoryFlowChunk(@CheckForNull FlowNode nodeBefore, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { + super (nodeBefore,firstNode, lastNode, nodeAfter); } public void setBranch(@Nonnull String branchName, @Nonnull MemoryFlowChunk branchBlock) { - if (branchBlock.getChunkType() != ChunkType.BLOCK) { - throw new IllegalArgumentException("All parallel branches must be blocks"); - } branches.put(branchName, branchBlock); } @@ -63,15 +53,4 @@ public Map getBranches() { return Collections.unmodifiableMap(branches); } - @Override - public long getPauseDurationMillis() { - if (branches.size() == 0) { - return 0; - } - long longestPause = 0; - for (Map.Entry branch : branches.entrySet()) { - longestPause = Math.max(longestPause, branch.getValue().getPauseDurationMillis()); - } - return longestPause; - } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java index 937345cc..cdb8e013 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java @@ -23,6 +23,7 @@ */ package org.jenkinsci.plugins.workflow.graphanalysis; +import com.google.common.base.Predicate; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.CheckForNull; @@ -33,12 +34,27 @@ * @author Sam Van Oort */ public interface SimpleBlockVisitor { + + + @Nonnull + public Predicate getChunkStartPredicate(); + + @Nonnull + public Predicate getChunkEndPredicate(); + /** Called when hitting the start of a block */ - public void blockStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); + public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); /** Called when hitting the end of a block */ - public void blockEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); + public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); - /** Called when encountering a node inside a block (may be the implicit outer block though) */ - public void atomNode(@Nonnull FlowNode node, @Nonnull ForkScanner scan); + /** + * Called for a flownode within the chunk that is neither start nor end. + * Ways you may want to use this: accumulate pause time, collect errors, etc. + * @param before Node before the current + * @param atomNode The node itself + * @param after Node after the current + * @param scan Reference to our forkscanner, if we want to poke at the state within + */ + public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java deleted file mode 100644 index d3163bc6..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Timeable.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -/** - * Something with distinct start and end times - */ -public interface Timeable { - public long getStartTimeMillis(); - public long getEndTimeMillis(); - public long getDurationMillis(); - public long getPauseDurationMillis(); -} From 1338b6423624e4669414f4d61cd3b6e749df9560 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 5 Aug 2016 15:54:30 -0400 Subject: [PATCH 059/104] Formalize the block visitor API with parallels and branches yet again --- .../workflow/graphanalysis/SimpleBlockVisitor.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java index cdb8e013..6e3acd11 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java @@ -30,12 +30,11 @@ import javax.annotation.Nonnull; /** - * Block handling mechanism + * Splits a flow into chunks. What is done with these chunks is 100% up to the consumer. * @author Sam Van Oort */ public interface SimpleBlockVisitor { - @Nonnull public Predicate getChunkStartPredicate(); @@ -48,6 +47,16 @@ public interface SimpleBlockVisitor { /** Called when hitting the end of a block */ public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); + /** Notifies that we've seen a new parallel block */ + public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner); + + /** Notifies that we've seen the end of a parallel block*/ + public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); + + public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); + + public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); + /** * Called for a flownode within the chunk that is neither start nor end. * Ways you may want to use this: accumulate pause time, collect errors, etc. From 6c4b8e30938b5200ea544d14cd4ead3367ae96b2 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 5 Aug 2016 18:43:59 -0400 Subject: [PATCH 060/104] Harden ChunkVisitor and start a FlowChunker --- .../workflow/graphanalysis/FlowChunker.java | 113 ++++++++++++++++++ .../graphanalysis/FlowScanningUtils.java | 1 + .../workflow/graphanalysis/ForkScanner.java | 43 ------- ...ckVisitor.java => SimpleChunkVisitor.java} | 32 ++++- .../StandardSimpleChunkVisitor.java | 79 ++++++++++++ 5 files changed, 221 insertions(+), 47 deletions(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java rename src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/{SimpleBlockVisitor.java => SimpleChunkVisitor.java} (60%) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java new file mode 100644 index 00000000..90e48f5b --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java @@ -0,0 +1,113 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import com.google.common.base.Predicate; +import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; +import org.jenkinsci.plugins.workflow.flow.FlowExecution; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.Enumeration; + +/** + * Splits a flow into chunks. How those chunks are handled is someone else's business... + * @author Sam Van Oort + */ +public class FlowChunker { + + // Adapter to convert from raw ForkScanner iteration to chunks + static class ChunkingIterator { + FlowNode next; + FlowNode previous; + boolean isInsideChunk; + SimpleChunkVisitor visitor; + + // Walk through visiting each node and firing callbacks as needed + boolean next(ForkScanner f) { + FlowNode currentParallelStart = f.getCurrentParallelStartNode(); + + if (f.hasNext()) { + FlowNode newNext = f.next(); // Next becomes current + boolean isTipOfParallelBranch = false; //Start or end node for branch + boolean isAtom = false; + if (visitor.getChunkEndPredicate().apply(next)) { + visitor.chunkEnd(next, previous, f); + } else if (visitor.getChunkStartPredicate().apply(next)) { + visitor.chunkStart(next, newNext, f); + } else { + isAtom = true; + // FIXME what if we're in parallel start or end + + } + if (next instanceof BlockEndNode) { + BlockStartNode start = ((BlockEndNode) next).getStartNode(); + ThreadNameAction thread = start.getAction(ThreadNameAction.class); + if (thread != null) { + visitor.parallelBranchEnd(thread.getThreadName(), start, next, f); + } else if (next.getParentIds().size() > 0) { + visitor.parallelEnd(start, next, f); + } + } else if (next instanceof BlockStartNode) { + ThreadNameAction thread = next.getAction(ThreadNameAction.class); + if (thread != null) { + visitor.parallelBranchStart(thread.getThreadName(), next.getParents().get(0), next, f); + } else { + // TODO use forkscanner state to see if we've hit a parallel start node + } + } else { + // TODO use the state in ForkScanner to detect if we're beinning in an implicit parallel block + } + + if(isAtom) { + if (!isTipOfParallelBranch) { + visitor.atomNode(newNext, next, previous, f); + } else { //We need to use parallel tips info? + // TODO case for start of branch + // TODO case for end of branch + } + } + + previous = next; + next = newNext; + return true; + } else { + finish(); + return false; + } + + } + + void finish() { + // cap things off for final node & do postprocessing + } + + } + + /** Walks through a flow, doing chunking */ + public static void walkme(FlowExecution exec, SimpleChunkVisitor visitor) { + ForkScanner scan = new ForkScanner(); + scan.setup(exec.getCurrentHeads()); + + ChunkingIterator context = new ChunkingIterator(); + context.isInsideChunk = visitor.startInsideChunk(); + context.visitor = visitor; + // SETUP for first nodes? + while (context.next(scan)) { + // Do nothing, it'll run until done + } + } + + /** + * Walks through splitting to chunks based on the condition and exposing them as something we can iterate over (yeah I know) + * @param run + * @param chunkStartCondition + * @param chunkEndCondition + * @return + */ + public static Enumeration splitMe(@Nonnull FlowExecution run, @Nonnull Predicate chunkStartCondition, @Nonnull Predicate chunkEndCondition) { + // TODO create enumerator that builds up an ArrayDeque of chunks & a tree of parallels if needed + return null; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java index e2ae479d..3ccee1d8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -31,6 +31,7 @@ import org.jenkinsci.plugins.workflow.actions.LabelAction; import org.jenkinsci.plugins.workflow.actions.LogAction; import org.jenkinsci.plugins.workflow.actions.StageAction; +import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 2608fcf5..05ae4efc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -441,47 +441,4 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection } return output; } - - public void visitBlocks(@CheckForNull List heads, @Nonnull SimpleBlockVisitor visitor) { - if (!setup(heads)) { - return; - } - - FlowNode previous = null; - FlowNode current = null; - FlowNode next = this.next(); - - if (hasNext()) { - current = next; - next = this.next(); - } else { - return; // No block here - } - - while (this.hasNext()) { - if (current instanceof BlockEndNode) { - visitor.chunkEnd(current, next, this); - } else if (current instanceof BlockStartNode) { - visitor.chunkStart(current, previous, this); - } else { - visitor.atomNode(next, current, previous, this); - } - - previous = current; - current = next; - next = next(); - } - - previous = current; - current = next; - next = null; - - if (current instanceof BlockEndNode) { - visitor.chunkEnd(current, next, this); - } else if (current instanceof BlockStartNode) { - visitor.chunkStart(current, previous, this); - } else { - visitor.atomNode(next, current, previous, this); - } - } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java similarity index 60% rename from src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java rename to src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index 6e3acd11..20a8ecaa 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleBlockVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -30,10 +30,31 @@ import javax.annotation.Nonnull; /** - * Splits a flow into chunks. What is done with these chunks is 100% up to the consumer. + * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks. + * The {@link FlowChunker} uses the split methods & holds state needed convert the {@link ForkScanner}'s API to invoke these right. + * + *

    Determining how we split into chunk.

    + *
      + *
    • {@link #getChunkStartPredicate()} Provides the condition marking the beginning of a chunk we care about
    • + *
    • {@link #getChunkEndPredicate()} Provides the condition to mark a node as ending a chunk we care about
    • + *
    + * + * Think of it as a finite state machine: we're either in a chunk or not. + * + *

    Callbacks Reporting on chunk/parallel information:

    + *
      + *
    • {@link #chunkStart(FlowNode, FlowNode, ForkScanner)} is called when we hit start of a boundary
    • + *
    • {@link #chunkEnd(FlowNode, FlowNode, ForkScanner)} is called when we hit end of a boundary
    • + *
    • {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} is called, used to gather information within a chunk
    • + *
    • All the parallel methods are used to report on parallel status - helpful when we need to deal with parallels internal to chunks.
    • + *
    + * + *

    Start/Stop predicates may both trigger on the same node (in which case end is invoked first). + * For example with marker nodes like the legacy stage. + * * @author Sam Van Oort */ -public interface SimpleBlockVisitor { +public interface SimpleChunkVisitor { @Nonnull public Predicate getChunkStartPredicate(); @@ -41,6 +62,9 @@ public interface SimpleBlockVisitor { @Nonnull public Predicate getChunkEndPredicate(); + /** If true, we create an implicit chunk when starting out and don't wait for end condition */ + public boolean startInsideChunk(); + /** Called when hitting the start of a block */ public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); @@ -53,9 +77,9 @@ public interface SimpleBlockVisitor { /** Notifies that we've seen the end of a parallel block*/ public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); - public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); + public void parallelBranchStart(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); - public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); + public void parallelBranchEnd(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); /** * Called for a flownode within the chunk that is neither start nor end. diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java new file mode 100644 index 00000000..b95fb27c --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java @@ -0,0 +1,79 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Created by @author Sam Van Oort + */ +public class StandardSimpleChunkVisitor implements SimpleChunkVisitor { + + private Predicate chunkStartPredicate; + private Predicate chunkEndPredicate; + + @Override + public boolean startInsideChunk() {return false;} + + @Nonnull + @Override + public Predicate getChunkStartPredicate() { + return chunkStartPredicate; + } + + @Nonnull + @Override + public Predicate getChunkEndPredicate() { + return chunkEndPredicate; + } + + public StandardSimpleChunkVisitor(Predicate chunkStartPredicate, Predicate chunkEndPredicate) { + this.chunkStartPredicate = chunkStartPredicate; + this.chunkEndPredicate = chunkEndPredicate; + } + + /** Creates visitor that breaks on blocks starts/ends */ + public StandardSimpleChunkVisitor() { + this.chunkStartPredicate = FlowScanningUtils.MATCH_BLOCK_START; + this.chunkEndPredicate = (Predicate)(Predicates.instanceOf(BlockEndNode.class)); + } + + @Override + public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) { + + } + + @Override + public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner) { + + } + + @Override + public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) { + + } + + @Override + public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) { + + } + + @Override + public void parallelBranchStart(String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) { + + } + + @Override + public void parallelBranchEnd(String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) { + + } + + @Override + public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan) { + + } +} From 0237e9689375a7b721837b2efc2175a6635dd038 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 8 Aug 2016 11:53:21 -0400 Subject: [PATCH 061/104] Forkscanner now tracks state that shows what kind of nodes we have (and what the next one is) --- .../workflow/graphanalysis/ForkScanner.java | 70 +++++++++++++++++++ .../ParallelMemoryFlowChunk.java | 5 +- .../graphanalysis/SimpleChunkVisitor.java | 22 +++--- .../graphanalysis/ForkScannerTest.java | 21 ++++++ 4 files changed, 106 insertions(+), 12 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 05ae4efc..c15bc4ea 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -24,6 +24,9 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowEndNode; @@ -63,6 +66,23 @@ */ public class ForkScanner extends AbstractFlowScanner { + public NodeType getCurrentType() { + return currentType; + } + + public NodeType getNextType() { + return nextType; + } + + /** Used to recognize special nodes */ + public enum NodeType { + NORMAL, + PARALLEL_START, + PARALLEL_END, + PARALLEL_BRANCH_START, + PARALLEL_BRANCH_END, + } + // Last element in stack is end of myCurrent parallel start, first is myCurrent start ArrayDeque parallelBlockStartStack = new ArrayDeque(); @@ -73,6 +93,9 @@ public class ForkScanner extends AbstractFlowScanner { private boolean walkingFromFinish = false; + protected NodeType currentType; + protected NodeType nextType; + @Override protected void reset() { parallelBlockStartStack.clear(); @@ -82,6 +105,26 @@ protected void reset() { myNext = null; } + // A bit of a dirty hack, but it works around the fact that we need trivial access to classes from workflow-cps + // For this and only this test. So, we load them from a context that is aware of them. + // Ex: workflow-cps can automatically set this correctly. Not perfectly graceful but it works. + private static Predicate parallelStartPredicate = Predicates.alwaysFalse(); + + // Invoke this passing a test against the ParallelStep conditions + public static void setParallelStartPredicate(@Nonnull Predicate pred) { + parallelStartPredicate = pred; + } + + // Needed because the *next* node might be a parallel start if we start in middle and we don't know it + public static boolean isParallelStart(@CheckForNull FlowNode f) { + return parallelStartPredicate.apply(f); + } + + // Needed because the *next* node might be a parallel end and we don't know it from a normal one + public static boolean isParallelEnd(@CheckForNull FlowNode f) { + return f != null && f instanceof BlockEndNode && isParallelStart(((BlockEndNode) f).getStartNode()); + } + /** If true, we are walking from the flow end node and have a complete view of the flow */ public boolean isWalkingFromFinish() { return walkingFromFinish; @@ -318,6 +361,7 @@ protected void setHeads(@Nonnull Collection heads) { currentParallelStartNode = currentParallelStart.forkStart; myCurrent = currentParallelStart.unvisited.pop(); myNext = myCurrent; + nextType = NodeType.PARALLEL_BRANCH_END; currentParallelStart.remainingBranches--; walkingFromFinish = false; } else { @@ -325,7 +369,15 @@ protected void setHeads(@Nonnull Collection heads) { walkingFromFinish = f instanceof FlowEndNode; myCurrent = f; myNext = f; + if (isParallelEnd(f)) { + nextType = NodeType.PARALLEL_BRANCH_END; + } else if (isParallelStart(f)) { + nextType = NodeType.PARALLEL_START; + } else { + nextType = NodeType.NORMAL; + } } + currentType = null; } /** @@ -405,6 +457,13 @@ protected FlowNode hitParallelStart() { return (output != null && !myBlackList.contains(output)) ? output : null; } + @Override + public FlowNode next() { + currentType = nextType; + FlowNode output = super.next(); + return output; + } + @Override protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { FlowNode output = null; @@ -419,9 +478,15 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection // Terminating a parallel scan FlowNode temp = hitParallelStart(); if (temp != null) { // Start node for current parallel block now that it is done + nextType = NodeType.PARALLEL_START; return temp; } } else if (!blackList.contains(p)) { + if (p instanceof BlockStartNode && p.getAction(ThreadNameAction.class) != null) { + nextType = NodeType.PARALLEL_BRANCH_START; + } else { + nextType = NodeType.NORMAL; + } return p; } } else if (current instanceof BlockEndNode && parents.size() > 1) { @@ -429,6 +494,7 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection BlockEndNode end = ((BlockEndNode) current); FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? if (possibleOutput != null) { + nextType = NodeType.PARALLEL_BRANCH_END; return possibleOutput; } } else { @@ -437,8 +503,12 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { output = currentParallelStart.unvisited.pop(); + nextType = NodeType.PARALLEL_BRANCH_END; currentParallelStart.remainingBranches--; } + if (output == null) { + nextType = null; + } return output; } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java index f7494b88..363ba735 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -30,6 +30,7 @@ import javax.annotation.Nonnull; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.Map; /** @@ -37,7 +38,9 @@ * @author Sam Van Oort */ public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk { - private HashMap branches = new HashMap(); + + // LinkedHashMap to preserve insert order + private LinkedHashMap branches = new LinkedHashMap(); public ParallelMemoryFlowChunk(@CheckForNull FlowNode nodeBefore, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { super (nodeBefore,firstNode, lastNode, nodeAfter); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index 20a8ecaa..ae81c03c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -54,32 +54,32 @@ * * @author Sam Van Oort */ -public interface SimpleChunkVisitor { +interface SimpleChunkVisitor { @Nonnull - public Predicate getChunkStartPredicate(); + Predicate getChunkStartPredicate(); @Nonnull - public Predicate getChunkEndPredicate(); + Predicate getChunkEndPredicate(); /** If true, we create an implicit chunk when starting out and don't wait for end condition */ - public boolean startInsideChunk(); + boolean startInsideChunk(); /** Called when hitting the start of a block */ - public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); + void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); /** Called when hitting the end of a block */ - public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); + void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); /** Notifies that we've seen a new parallel block */ - public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner); + void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner); /** Notifies that we've seen the end of a parallel block*/ - public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); + void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); - public void parallelBranchStart(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); + void parallelBranchStart(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); - public void parallelBranchEnd(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); + void parallelBranchEnd(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); /** * Called for a flownode within the chunk that is neither start nor end. @@ -89,5 +89,5 @@ public interface SimpleChunkVisitor { * @param after Node after the current * @param scan Reference to our forkscanner, if we want to poke at the state within */ - public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan); + void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan); } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index de232f58..ed36b21b 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -24,7 +24,10 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import com.google.common.base.Predicate; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; +import org.jenkinsci.plugins.workflow.cps.nodes.StepStartNode; +import org.jenkinsci.plugins.workflow.cps.steps.ParallelStep; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; @@ -149,6 +152,13 @@ public void setUp() throws Exception { this.NESTED_PARALLEL_RUN = b; } + public static Predicate PARALLEL_START_PREDICATE = new Predicate() { + @Override + public boolean apply(FlowNode input) { + return input != null && input instanceof StepStartNode && (((StepStartNode) input).getDescriptor().getClass() == ParallelStep.DescriptorImpl.class); + } + }; + @Test public void testForkedScanner() throws Exception { FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); @@ -157,6 +167,7 @@ public void testForkedScanner() throws Exception { // Initial case ForkScanner scanner = new ForkScanner(); scanner.setup(heads, null); + ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE); Assert.assertNull(scanner.currentParallelStart); Assert.assertNull(scanner.currentParallelStartNode); Assert.assertNotNull(scanner.parallelBlockStartStack); @@ -166,6 +177,8 @@ public void testForkedScanner() throws Exception { // Fork case scanner.setup(exec.getNode("13")); Assert.assertFalse(scanner.isWalkingFromFinish()); + Assert.assertEquals(null, scanner.currentType); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.nextType); Assert.assertEquals("13", scanner.next().getId()); Assert.assertNotNull(scanner.parallelBlockStartStack); Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); @@ -178,9 +191,17 @@ public void testForkedScanner() throws Exception { Assert.assertEquals(exec.getNode("4"), start.forkStart); Assert.assertEquals(exec.getNode("9"), scanner.next()); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getCurrentType()); + Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getNextType()); Assert.assertEquals(exec.getNode("8"), scanner.next()); + Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getCurrentType()); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_START, scanner.getNextType()); Assert.assertEquals(exec.getNode("6"), scanner.next()); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_START, scanner.getCurrentType()); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getNextType()); FlowNode f = scanner.next(); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getCurrentType()); + Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getNextType()); Assert.assertEquals(exec.getNode("12"), f); // Now we test the least common ancestor bits From abdf296768b129db11f328175bdcedfa8a1ef850 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 9 Aug 2016 21:33:26 -0400 Subject: [PATCH 062/104] When splitting chunks you need to know previous node for inclusive vs. exclusive endpoints --- .../graphanalysis/BlockChunkFinder.java | 30 +++++++++++++ .../workflow/graphanalysis/ChunkFinder.java | 34 +++++++++++++++ .../workflow/graphanalysis/ForkScanner.java | 4 +- .../workflow/graphanalysis/LabelFinder.java | 42 +++++++++++++++++++ .../graphanalysis/ParallelFlowChunk.java | 2 +- .../graphanalysis/SimpleChunkVisitor.java | 42 +++++++++---------- 6 files changed, 129 insertions(+), 25 deletions(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java new file mode 100644 index 00000000..d739b59c --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java @@ -0,0 +1,30 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Matches start and end of a block. Any block + * Created by @author Sam Van Oort + */ +public class BlockChunkFinder implements ChunkFinder { + + @Override + public boolean isStartInsideChunk() { + return false; + } + + @Override + public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { + return current instanceof BlockStartNode; + } + + @Override + public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { + return current instanceof BlockEndNode; + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java new file mode 100644 index 00000000..894e5cd1 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java @@ -0,0 +1,34 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Matches the start and end of a chunk + * Created by @author Sam Van Oort + */ +public interface ChunkFinder { + + /** If true, a chunk is implicitly created whenever we begin */ + boolean isStartInsideChunk(); + + /** + * Test if the current node is the start of a new chunk (inclusive) + * @param current Node to test for being a start, it will begin the chunk and be included + * @param previous Previous node, to use in testing chunk + * @return True if current node is the beginning of chunk + */ + boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous); + + /** + * Test if the current node is the end of a chunk (inclusive) + * @param current Node to test for being end + *

    For a block, the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} + *

    For a legacy stage or marker, this will be first node of new stage (previous is the marker) + * @param previous Previous node, to use in testing chunk + * @return True if current is the end of a chunk (inclusive) + */ + boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous); +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index c15bc4ea..019efa9b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -370,7 +370,7 @@ protected void setHeads(@Nonnull Collection heads) { myCurrent = f; myNext = f; if (isParallelEnd(f)) { - nextType = NodeType.PARALLEL_BRANCH_END; + nextType = NodeType.PARALLEL_END; } else if (isParallelStart(f)) { nextType = NodeType.PARALLEL_START; } else { @@ -484,6 +484,8 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection } else if (!blackList.contains(p)) { if (p instanceof BlockStartNode && p.getAction(ThreadNameAction.class) != null) { nextType = NodeType.PARALLEL_BRANCH_START; + } else if (ForkScanner.isParallelEnd(p)) { + nextType = NodeType.PARALLEL_END; } else { nextType = NodeType.NORMAL; } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java new file mode 100644 index 00000000..d9a0b484 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java @@ -0,0 +1,42 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.actions.LabelAction; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Basically finds stages. *Technically* it's any block of nodes. + * Creates chunks whenever you have a labelled linear block (not a parallel branch). + * Created by @author Sam Van Oort + */ +public class LabelFinder implements ChunkFinder { + + public boolean isStartInsideChunk() { + return true; + } + + @Override + public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { + LabelAction la = current.getAction(LabelAction.class); + return la != null; + } + + /** End is where you have a label marker before it... or */ + @Override + public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { + if (previous == null) { + return false; + } + if (current instanceof BlockEndNode) { + BlockStartNode bsn = ((BlockEndNode) previous).getStartNode(); + if (isChunkStart(bsn, null)) { + return true; + } + } + return previous != null && isChunkStart(previous, null); + } +} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java index a98abe13..ae08052b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java @@ -4,7 +4,7 @@ import java.util.Map; /** - * FlowChunk that has parallel branches + * FlowChunk mapping to the block from a Parallel step (with parallel branches inside) */ public interface ParallelFlowChunk extends FlowChunk { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index ae81c03c..cfb5936b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -23,7 +23,6 @@ */ package org.jenkinsci.plugins.workflow.graphanalysis; -import com.google.common.base.Predicate; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.CheckForNull; @@ -31,44 +30,41 @@ /** * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks. - * The {@link FlowChunker} uses the split methods & holds state needed convert the {@link ForkScanner}'s API to invoke these right. + *

    A {@link FlowChunker} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. * - *

    Determining how we split into chunk.

    + *

    Implementations get to decide how to use & handle chunks. + *

    At a minimum they should handle:

    *
      - *
    • {@link #getChunkStartPredicate()} Provides the condition marking the beginning of a chunk we care about
    • - *
    • {@link #getChunkEndPredicate()} Provides the condition to mark a node as ending a chunk we care about
    • + *
    • Unbalanced numbers of chunk start/end calls
    • + *
    • A chunk end with no beginning (runs to start of flow, or never began)
    • + *
    • A chunk start with no end (ex: a block that hasn't completed running)
    • + *
    • Other starts/ends before we hit the closing one
    • *
    * - * Think of it as a finite state machine: we're either in a chunk or not. + * Important implementation note: multiple callbacks can be invoked for a single node depending on its type.For example, we may capture parallels as chunks. * *

    Callbacks Reporting on chunk/parallel information:

    *
      - *
    • {@link #chunkStart(FlowNode, FlowNode, ForkScanner)} is called when we hit start of a boundary
    • - *
    • {@link #chunkEnd(FlowNode, FlowNode, ForkScanner)} is called when we hit end of a boundary
    • - *
    • {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} is called, used to gather information within a chunk
    • + *
    • {@link #chunkStart(FlowNode, FlowNode, ForkScanner)} is called on the current node when we hit start of a boundary (inclusive)
    • + *
    • {@link #chunkEnd(FlowNode, FlowNode, ForkScanner)} is called when we hit end of a boundary (inclusive)
    • + *
    • {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} called when a node is neither start nor end.
    • *
    • All the parallel methods are used to report on parallel status - helpful when we need to deal with parallels internal to chunks.
    • *
    * - *

    Start/Stop predicates may both trigger on the same node (in which case end is invoked first). - * For example with marker nodes like the legacy stage. - * * @author Sam Van Oort */ interface SimpleChunkVisitor { - @Nonnull - Predicate getChunkStartPredicate(); - - @Nonnull - Predicate getChunkEndPredicate(); - - /** If true, we create an implicit chunk when starting out and don't wait for end condition */ - boolean startInsideChunk(); - - /** Called when hitting the start of a block */ + /** + * Called when hitting the start of a chunk + * @param startNode First node in chunk (marker), included in node + * @param beforeBlock First node before chunk + * @param scanner Forkscanner used (for state tracking) + */ void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); - /** Called when hitting the end of a block */ + /** Called when hitting the end of a block (determined by the chunkEndPredicate) */ void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); /** Notifies that we've seen a new parallel block */ From a9554516abe93954d8c4b8a6e5493e8d01ef3bb2 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 10 Aug 2016 14:07:08 -0400 Subject: [PATCH 063/104] Add some peeking methods to ForkScanner and also a few small access/findbugs fixes --- .../plugins/workflow/graphanalysis/ForkScanner.java | 12 ++++++++++++ .../plugins/workflow/graphanalysis/LabelFinder.java | 2 +- .../workflow/graphanalysis/MemoryFlowChunk.java | 8 ++++---- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 019efa9b..1d9e2fb3 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -457,6 +457,18 @@ protected FlowNode hitParallelStart() { return (output != null && !myBlackList.contains(output)) ? output : null; } + /** Return the current node without iterating */ + @CheckForNull + public FlowNode peekCurrent() { + return this.myCurrent; + } + + /** Return the next node without iterating */ + @CheckForNull + public FlowNode peekNext() { + return this.myNext; + } + @Override public FlowNode next() { currentType = nextType; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java index d9a0b484..e1ae2a1c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java @@ -37,6 +37,6 @@ public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode prev return true; } } - return previous != null && isChunkStart(previous, null); + return isChunkStart(previous, null); } } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index eb7df8c4..ac5dee6e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -35,10 +35,10 @@ * @author Sam Van Oort */ public class MemoryFlowChunk implements FlowChunkWithContext { - private FlowNode firstNode; - private FlowNode lastNode; - private FlowNode nodeBefore; - private FlowNode nodeAfter; + protected FlowNode firstNode; + protected FlowNode lastNode; + protected FlowNode nodeBefore; + protected FlowNode nodeAfter; public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { this.setNodeBefore(before); From 6eb370506702f6a9b70bc589d7fb22cfceafba23 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 10 Aug 2016 18:39:43 -0400 Subject: [PATCH 064/104] Finish the internal SimpleChunkVisitor iteration logic in ForkScanner, refactor --- .../workflow/graphanalysis/FlowChunker.java | 113 ------------------ .../workflow/graphanalysis/ForkScanner.java | 41 +++++++ .../ParallelMemoryFlowChunk.java | 4 + .../graphanalysis/SimpleChunkVisitor.java | 6 +- .../StandardSimpleChunkVisitor.java | 63 +++++----- 5 files changed, 76 insertions(+), 151 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java deleted file mode 100644 index 90e48f5b..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunker.java +++ /dev/null @@ -1,113 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import com.google.common.base.Predicate; -import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; -import org.jenkinsci.plugins.workflow.graph.BlockEndNode; -import org.jenkinsci.plugins.workflow.graph.BlockStartNode; -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.Nonnull; -import java.util.ArrayDeque; -import java.util.Enumeration; - -/** - * Splits a flow into chunks. How those chunks are handled is someone else's business... - * @author Sam Van Oort - */ -public class FlowChunker { - - // Adapter to convert from raw ForkScanner iteration to chunks - static class ChunkingIterator { - FlowNode next; - FlowNode previous; - boolean isInsideChunk; - SimpleChunkVisitor visitor; - - // Walk through visiting each node and firing callbacks as needed - boolean next(ForkScanner f) { - FlowNode currentParallelStart = f.getCurrentParallelStartNode(); - - if (f.hasNext()) { - FlowNode newNext = f.next(); // Next becomes current - boolean isTipOfParallelBranch = false; //Start or end node for branch - boolean isAtom = false; - if (visitor.getChunkEndPredicate().apply(next)) { - visitor.chunkEnd(next, previous, f); - } else if (visitor.getChunkStartPredicate().apply(next)) { - visitor.chunkStart(next, newNext, f); - } else { - isAtom = true; - // FIXME what if we're in parallel start or end - - } - if (next instanceof BlockEndNode) { - BlockStartNode start = ((BlockEndNode) next).getStartNode(); - ThreadNameAction thread = start.getAction(ThreadNameAction.class); - if (thread != null) { - visitor.parallelBranchEnd(thread.getThreadName(), start, next, f); - } else if (next.getParentIds().size() > 0) { - visitor.parallelEnd(start, next, f); - } - } else if (next instanceof BlockStartNode) { - ThreadNameAction thread = next.getAction(ThreadNameAction.class); - if (thread != null) { - visitor.parallelBranchStart(thread.getThreadName(), next.getParents().get(0), next, f); - } else { - // TODO use forkscanner state to see if we've hit a parallel start node - } - } else { - // TODO use the state in ForkScanner to detect if we're beinning in an implicit parallel block - } - - if(isAtom) { - if (!isTipOfParallelBranch) { - visitor.atomNode(newNext, next, previous, f); - } else { //We need to use parallel tips info? - // TODO case for start of branch - // TODO case for end of branch - } - } - - previous = next; - next = newNext; - return true; - } else { - finish(); - return false; - } - - } - - void finish() { - // cap things off for final node & do postprocessing - } - - } - - /** Walks through a flow, doing chunking */ - public static void walkme(FlowExecution exec, SimpleChunkVisitor visitor) { - ForkScanner scan = new ForkScanner(); - scan.setup(exec.getCurrentHeads()); - - ChunkingIterator context = new ChunkingIterator(); - context.isInsideChunk = visitor.startInsideChunk(); - context.visitor = visitor; - // SETUP for first nodes? - while (context.next(scan)) { - // Do nothing, it'll run until done - } - } - - /** - * Walks through splitting to chunks based on the condition and exposing them as something we can iterate over (yeah I know) - * @param run - * @param chunkStartCondition - * @param chunkEndCondition - * @return - */ - public static Enumeration splitMe(@Nonnull FlowExecution run, @Nonnull Predicate chunkStartCondition, @Nonnull Predicate chunkEndCondition) { - // TODO create enumerator that builds up an ArrayDeque of chunks & a tree of parallels if needed - return null; - } -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 1d9e2fb3..3a67d3e7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -525,4 +525,45 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection } return output; } + + /** Walk through flows */ + public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { + FlowNode prev = null; + while(hasNext()) { + prev = myCurrent; + FlowNode f = next(); + + boolean boundary = false; + if (finder.isChunkStart(myCurrent, prev)) { + visitor.chunkStart(myCurrent, myNext, this); + boundary = true; + } + if (finder.isChunkEnd(myCurrent, prev)) { + visitor.chunkEnd(myCurrent, prev, this); + boundary = true; + } + if (!boundary) { + visitor.atomNode(prev, f, myNext, this); + } + + // Trigger on parallels + switch (currentType) { + case PARALLEL_END: + visitor.parallelEnd(this.currentParallelStartNode, prev, this); + break; + case PARALLEL_START: + visitor.parallelStart(myCurrent, prev, this); + break; + case PARALLEL_BRANCH_END: + visitor.parallelBranchEnd(myCurrent, this.currentParallelStartNode, this); + break; + case PARALLEL_BRANCH_START: + visitor.parallelBranchStart(myCurrent, this.currentParallelStartNode, this); + break; + default: + break; + } + } + } + } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java index 363ba735..8029e923 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -42,6 +42,10 @@ public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements Parallel // LinkedHashMap to preserve insert order private LinkedHashMap branches = new LinkedHashMap(); + public ParallelMemoryFlowChunk(@Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode) { + super (null,firstNode, lastNode, null); + } + public ParallelMemoryFlowChunk(@CheckForNull FlowNode nodeBefore, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { super (nodeBefore,firstNode, lastNode, nodeAfter); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index cfb5936b..da8e904c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -30,7 +30,7 @@ /** * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks. - *

    A {@link FlowChunker} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. + *

    A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. * *

    Implementations get to decide how to use & handle chunks. *

    At a minimum they should handle:

    @@ -73,9 +73,9 @@ interface SimpleChunkVisitor { /** Notifies that we've seen the end of a parallel block*/ void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); - void parallelBranchStart(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); + void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); - void parallelBranchEnd(@Nonnull String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); + void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); /** * Called for a flownode within the chunk that is neither start nor end. diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java index b95fb27c..a8c90ec3 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java @@ -1,46 +1,29 @@ package org.jenkinsci.plugins.workflow.graphanalysis; -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; +import java.util.ArrayDeque; +import java.util.Iterator; +import java.util.ListIterator; /** + * Fairly straightforward implementation that will cover many cases. + * To use it, extend it and invoke the parent methods while adding internal logic. * Created by @author Sam Van Oort */ +@SuppressFBWarnings public class StandardSimpleChunkVisitor implements SimpleChunkVisitor { - private Predicate chunkStartPredicate; - private Predicate chunkEndPredicate; + // FIXME: nice-to-have: track current parallel state so we can do pause timing for parallel branches. - @Override - public boolean startInsideChunk() {return false;} - - @Nonnull - @Override - public Predicate getChunkStartPredicate() { - return chunkStartPredicate; - } - - @Nonnull - @Override - public Predicate getChunkEndPredicate() { - return chunkEndPredicate; - } + protected ArrayDeque currentChunks = new ArrayDeque(); + protected MemoryFlowChunk currentChunk; - public StandardSimpleChunkVisitor(Predicate chunkStartPredicate, Predicate chunkEndPredicate) { - this.chunkStartPredicate = chunkStartPredicate; - this.chunkEndPredicate = chunkEndPredicate; - } - - /** Creates visitor that breaks on blocks starts/ends */ - public StandardSimpleChunkVisitor() { - this.chunkStartPredicate = FlowScanningUtils.MATCH_BLOCK_START; - this.chunkEndPredicate = (Predicate)(Predicates.instanceOf(BlockEndNode.class)); - } + // Tracks parallel state, last item in currentChunks + protected ArrayDeque parallels = new ArrayDeque(); @Override public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) { @@ -54,22 +37,32 @@ public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBloc @Override public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) { - + /*if (parallels.size() > 0) { + Iterator it = parallels.iterator(); + while (it.hasNext()) { + ParallelMemoryFlowChunk p = it.next(); + if (p.getFirstNode() == parallelStartNode) { + it.remove(); + break; + } + } + }*/ } @Override public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) { - + /*ParallelMemoryFlowChunk chunk = new ParallelMemoryFlowChunk(parallelStartNode, parallelEndNode); + parallels.push(chunk);*/ } @Override - public void parallelBranchStart(String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) { - + public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) { + // TODO handle me } @Override - public void parallelBranchEnd(String branchName, @Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) { - + public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) { + // TOOD do stuff with me } @Override From ac2609d02a247478c7ac11ffff057df62e5ba7c3 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 10 Aug 2016 18:54:05 -0400 Subject: [PATCH 065/104] Remove the useless peek methods --- .../plugins/workflow/graphanalysis/ForkScanner.java | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 3a67d3e7..e1efd8e7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -457,18 +457,6 @@ protected FlowNode hitParallelStart() { return (output != null && !myBlackList.contains(output)) ? output : null; } - /** Return the current node without iterating */ - @CheckForNull - public FlowNode peekCurrent() { - return this.myCurrent; - } - - /** Return the next node without iterating */ - @CheckForNull - public FlowNode peekNext() { - return this.myNext; - } - @Override public FlowNode next() { currentType = nextType; From e8e6203dea5404248aa1af34dde257b41e24f2c7 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 10 Aug 2016 19:35:38 -0400 Subject: [PATCH 066/104] Fix test failure --- .../jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java | 2 +- .../plugins/workflow/graphanalysis/ForkScannerTest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index e1efd8e7..b0c6120c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -122,7 +122,7 @@ public static boolean isParallelStart(@CheckForNull FlowNode f) { // Needed because the *next* node might be a parallel end and we don't know it from a normal one public static boolean isParallelEnd(@CheckForNull FlowNode f) { - return f != null && f instanceof BlockEndNode && isParallelStart(((BlockEndNode) f).getStartNode()); + return f != null && f instanceof BlockEndNode && (f.getParents().size()>1 || isParallelStart(((BlockEndNode) f).getStartNode())); } /** If true, we are walking from the flow end node and have a complete view of the flow */ diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index ed36b21b..c32a1fac 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -178,7 +178,7 @@ public void testForkedScanner() throws Exception { scanner.setup(exec.getNode("13")); Assert.assertFalse(scanner.isWalkingFromFinish()); Assert.assertEquals(null, scanner.currentType); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.nextType); + Assert.assertEquals(ForkScanner.NodeType.PARALLEL_END, scanner.nextType); Assert.assertEquals("13", scanner.next().getId()); Assert.assertNotNull(scanner.parallelBlockStartStack); Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); From b5c2b9d06dd3b64005d14ccf7af7aba3c65e23f3 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 10 Aug 2016 19:57:44 -0400 Subject: [PATCH 067/104] More small fixes --- .../jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java | 3 +++ .../plugins/workflow/graphanalysis/SimpleChunkVisitor.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index b0c6120c..b4d1218d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -517,6 +517,9 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection /** Walk through flows */ public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { FlowNode prev = null; + if (finder.isStartInsideChunk() && hasNext()) { + visitor.chunkEnd(this.myNext, null, this); + } while(hasNext()) { prev = myCurrent; FlowNode f = next(); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index da8e904c..c10c941f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -54,7 +54,7 @@ * * @author Sam Van Oort */ -interface SimpleChunkVisitor { +public interface SimpleChunkVisitor { /** * Called when hitting the start of a chunk From 866e21deb93897a43512aed173a5f46fda171d7b Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 10 Aug 2016 22:31:48 -0400 Subject: [PATCH 068/104] Test visitor and tests for visitor iteration in ForkScanner --- .../workflow/graphanalysis/ForkScanner.java | 6 +- .../graphanalysis/SimpleChunkVisitor.java | 24 +++- .../graphanalysis/ForkScannerTest.java | 49 ++++++++- .../workflow/graphanalysis/TestVisitor.java | 104 ++++++++++++++++++ 4 files changed, 175 insertions(+), 8 deletions(-) create mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index b4d1218d..907828a2 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -521,7 +521,7 @@ public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { visitor.chunkEnd(this.myNext, null, this); } while(hasNext()) { - prev = myCurrent; + prev = (myCurrent != myNext) ? myCurrent : null; FlowNode f = next(); boolean boundary = false; @@ -546,10 +546,10 @@ public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { visitor.parallelStart(myCurrent, prev, this); break; case PARALLEL_BRANCH_END: - visitor.parallelBranchEnd(myCurrent, this.currentParallelStartNode, this); + visitor.parallelBranchEnd(this.currentParallelStartNode, myCurrent, this); break; case PARALLEL_BRANCH_START: - visitor.parallelBranchStart(myCurrent, this.currentParallelStartNode, this); + visitor.parallelBranchStart(this.currentParallelStartNode, myCurrent, this); break; default: break; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index c10c941f..8419b6aa 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -64,15 +64,31 @@ public interface SimpleChunkVisitor { */ void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); - /** Called when hitting the end of a block (determined by the chunkEndPredicate) */ - void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner); + /** Called when hitting the end of a block */ + void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner); - /** Notifies that we've seen a new parallel block */ + /** + * Notifies that we've hit the start of a parallel block (the point where it branches out) + * @param parallelStartNode The {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} beginning it, next will be branches + * @param branchNode {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} for one of the branches (it will be labelled) + * @param scanner ForkScanner used + */ void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner); - /** Notifies that we've seen the end of a parallel block*/ + /** + * Notifies that we've seen the end of a parallel block + * @param parallelStartNode First node of parallel + * @param parallelEndNode Last node of parallel + * @param scanner + */ void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); + /** + * + * @param parallelStartNode + * @param branchStartNode + * @param scanner + */ void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner); void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index c32a1fac..4bd1ff03 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -25,6 +25,9 @@ package org.jenkinsci.plugins.workflow.graphanalysis; import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.cps.nodes.StepStartNode; import org.jenkinsci.plugins.workflow.cps.steps.ParallelStep; @@ -42,10 +45,12 @@ import org.junit.Assert; import java.util.ArrayDeque; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashSet; +import java.util.List; import java.util.Set; // Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers @@ -306,7 +311,6 @@ public void testFlowSegmentSplit() throws Exception { public void testLeastCommonAncestor() throws Exception { FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); - ForkScanner scan = new ForkScanner(); // Starts at the ends of the parallel branches Set heads = new LinkedHashSet(Arrays.asList(exec.getNode("12"), exec.getNode("9"))); @@ -341,4 +345,47 @@ public void testLeastCommonAncestor() throws Exception { Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek()); Assert.assertEquals(exec.getNode("4"), outer.forkStart); } + + /** For nodes, see {@link #SIMPLE_PARALLEL_RUN} */ + @Test + public void testSimpleVisitor() throws Exception { + ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE); + FlowExecution exec = this.SIMPLE_PARALLEL_RUN.getExecution(); + ForkScanner f = new ForkScanner(); + f.setup(exec.getCurrentHeads()); + TestVisitor visitor = new TestVisitor(); + + f.visitSimpleChunks(visitor, new BlockChunkFinder()); + + // 13 calls for chunk/atoms, 6 for parallels + Assert.assertEquals(19, visitor.calls.size()); + + // End has nothing after it, just last node (15) + TestVisitor.CallEntry last = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_END, 15, -1, -1, -1); + last.assertEquals(visitor.calls.get(0)); + + // Start has nothing before it, just the first node (2) + TestVisitor.CallEntry first = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_START, 2, -1, -1, -1); + first.assertEquals(visitor.calls.get(18)); + + List parallelCalls = Lists.newArrayList(Iterables.filter(visitor.calls, new Predicate() { + @Override + public boolean apply(TestVisitor.CallEntry input) { + return input.type != null + && input.type != TestVisitor.CallType.ATOM_NODE + && input.type != TestVisitor.CallType.CHUNK_START + && input.type != TestVisitor.CallType.CHUNK_END; + } + })); + Assert.assertEquals(6, parallelCalls.size()); + // Start to end + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_END, 4, 13).assertEquals(visitor.calls.get(0)); + + // Start to end, in reverse order + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 12).assertEquals(visitor.calls.get(1)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 7).assertEquals(visitor.calls.get(2)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 9).assertEquals(visitor.calls.get(3)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 6).assertEquals(visitor.calls.get(4)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 6).assertEquals(visitor.calls.get(5)); + } } diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java new file mode 100644 index 00000000..634b5a63 --- /dev/null +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java @@ -0,0 +1,104 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; +import org.junit.Assert; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Test visitor class, tracks invocations of methods + */ +public class TestVisitor implements SimpleChunkVisitor { + public enum CallType { + ATOM_NODE, + CHUNK_START, + CHUNK_END, + PARALLEL_START, + PARALLEL_END, + PARALLEL_BRANCH_START, + PARALLEL_BRANCH_END + } + + public static class CallEntry { + CallType type; + int[] ids = {-1, -1, -1, -1}; + + public void setIds(FlowNode... nodes) { + for (int i=0; i calls = new ArrayList(); + + @Override + public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) { + calls.add(new CallEntry(CallType.CHUNK_START, startNode, beforeBlock)); + } + + @Override + public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner) { + calls.add(new CallEntry(CallType.CHUNK_END, endNode, afterChunk)); + } + + @Override + public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) { + calls.add(new CallEntry(CallType.PARALLEL_START, parallelStartNode, branchNode)); + } + + @Override + public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) { + calls.add(new CallEntry(CallType.PARALLEL_END, parallelStartNode, parallelEndNode)); + } + + @Override + public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) { + calls.add(new CallEntry(CallType.PARALLEL_BRANCH_START, parallelStartNode, branchStartNode)); + } + + @Override + public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) { + calls.add(new CallEntry(CallType.PARALLEL_BRANCH_END, parallelStartNode, branchEndNode)); + } + + @Override + public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan) { + calls.add(new CallEntry(CallType.ATOM_NODE, before, atomNode, after)); + } +} From cd6e3dd28b841e72462c41dc226ae287ab58ea4e Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 11 Aug 2016 11:15:19 -0400 Subject: [PATCH 069/104] Soften requirements for MemoryFlowChunk --- .../plugins/workflow/graphanalysis/MemoryFlowChunk.java | 7 +++++-- .../workflow/graphanalysis/StandardSimpleChunkVisitor.java | 2 -- .../plugins/workflow/graphanalysis/ForkScannerTest.java | 2 ++ 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index ac5dee6e..08e698ed 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -47,7 +47,10 @@ public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNod this.setNodeAfter(lastNode); } - @Nonnull + public MemoryFlowChunk() { + + } + @Override public FlowNode getFirstNode() { return firstNode; @@ -57,7 +60,7 @@ public void setFirstNode(FlowNode firstNode) { this.firstNode = firstNode; } - @Nonnull + @Override public FlowNode getLastNode() { return lastNode; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java index a8c90ec3..68bb870c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java @@ -6,8 +6,6 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import java.util.ArrayDeque; -import java.util.Iterator; -import java.util.ListIterator; /** * Fairly straightforward implementation that will cover many cases. diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index 4bd1ff03..c5afb114 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -381,11 +381,13 @@ public boolean apply(TestVisitor.CallEntry input) { // Start to end new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_END, 4, 13).assertEquals(visitor.calls.get(0)); + /* Tests for parallel handling // Start to end, in reverse order new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 12).assertEquals(visitor.calls.get(1)); new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 7).assertEquals(visitor.calls.get(2)); new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 9).assertEquals(visitor.calls.get(3)); new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 6).assertEquals(visitor.calls.get(4)); new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 6).assertEquals(visitor.calls.get(5)); + */ } } From 84656e5cf5bb8120587dc5c8f60582a04f9ad6d0 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 11 Aug 2016 11:18:55 -0400 Subject: [PATCH 070/104] Remove unused StandardSimpleChunkVisitor for now --- .../ParallelMemoryFlowChunk.java | 2 +- .../StandardSimpleChunkVisitor.java | 70 ------------------- 2 files changed, 1 insertion(+), 71 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java index 8029e923..66ce728a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -34,7 +34,7 @@ import java.util.Map; /** - * Corresponds to a parallel block, does some customization to compute the timing with parallel branches + * Corresponds to a parallel block, acts as an in-memory container that can plug into status/timing APIs * @author Sam Van Oort */ public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java deleted file mode 100644 index 68bb870c..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardSimpleChunkVisitor.java +++ /dev/null @@ -1,70 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import java.util.ArrayDeque; - -/** - * Fairly straightforward implementation that will cover many cases. - * To use it, extend it and invoke the parent methods while adding internal logic. - * Created by @author Sam Van Oort - */ -@SuppressFBWarnings -public class StandardSimpleChunkVisitor implements SimpleChunkVisitor { - - // FIXME: nice-to-have: track current parallel state so we can do pause timing for parallel branches. - - protected ArrayDeque currentChunks = new ArrayDeque(); - protected MemoryFlowChunk currentChunk; - - // Tracks parallel state, last item in currentChunks - protected ArrayDeque parallels = new ArrayDeque(); - - @Override - public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) { - - } - - @Override - public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterBlock, @Nonnull ForkScanner scanner) { - - } - - @Override - public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) { - /*if (parallels.size() > 0) { - Iterator it = parallels.iterator(); - while (it.hasNext()) { - ParallelMemoryFlowChunk p = it.next(); - if (p.getFirstNode() == parallelStartNode) { - it.remove(); - break; - } - } - }*/ - } - - @Override - public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) { - /*ParallelMemoryFlowChunk chunk = new ParallelMemoryFlowChunk(parallelStartNode, parallelEndNode); - parallels.push(chunk);*/ - } - - @Override - public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) { - // TODO handle me - } - - @Override - public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) { - // TOOD do stuff with me - } - - @Override - public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan) { - - } -} From 55dc020d06e950e22dc7b126546f674d48ede562 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 11 Aug 2016 21:13:58 -0400 Subject: [PATCH 071/104] Add pause timing to MemoryFlowChunk --- .../plugins/workflow/graphanalysis/MemoryFlowChunk.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index 08e698ed..20b4c3b8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -39,6 +39,7 @@ public class MemoryFlowChunk implements FlowChunkWithContext { protected FlowNode lastNode; protected FlowNode nodeBefore; protected FlowNode nodeAfter; + private long pauseTimeMillis = -1; public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { this.setNodeBefore(before); @@ -87,4 +88,12 @@ public FlowNode getNodeAfter() { public void setNodeAfter(FlowNode nodeAfter) { this.nodeAfter = nodeAfter; } + + public long getPauseTimeMillis() { + return pauseTimeMillis; + } + + public void setPauseTimeMillis(long pauseTimeMillis) { + this.pauseTimeMillis = pauseTimeMillis; + } } From 4af21f5e8a6ca4b419bc5d6108db082360f1dfec Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 11 Aug 2016 22:08:11 -0400 Subject: [PATCH 072/104] Fixes and test fixes for simple visitor parallels --- .../workflow/graphanalysis/ForkScanner.java | 8 ++++++-- .../graphanalysis/ForkScannerTest.java | 18 ++++++++++-------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 907828a2..f941084b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -539,8 +539,10 @@ public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { // Trigger on parallels switch (currentType) { + case NORMAL: + break; case PARALLEL_END: - visitor.parallelEnd(this.currentParallelStartNode, prev, this); + visitor.parallelEnd(this.currentParallelStartNode, myCurrent, this); break; case PARALLEL_START: visitor.parallelStart(myCurrent, prev, this); @@ -549,7 +551,9 @@ public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { visitor.parallelBranchEnd(this.currentParallelStartNode, myCurrent, this); break; case PARALLEL_BRANCH_START: - visitor.parallelBranchStart(this.currentParallelStartNode, myCurrent, this); + // Needed because once we hit the start of the last branch, the next node is our currentParallelStart + FlowNode parallelStart = (nextType == NodeType.PARALLEL_START) ? myNext : this.currentParallelStartNode; + visitor.parallelBranchStart(parallelStart, myCurrent, this); break; default: break; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index c5afb114..247d8e3b 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -379,15 +379,17 @@ public boolean apply(TestVisitor.CallEntry input) { })); Assert.assertEquals(6, parallelCalls.size()); // Start to end - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_END, 4, 13).assertEquals(visitor.calls.get(0)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_END, 4, 13).assertEquals(parallelCalls.get(0)); - /* Tests for parallel handling + //Tests for parallel handling // Start to end, in reverse order - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 12).assertEquals(visitor.calls.get(1)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 7).assertEquals(visitor.calls.get(2)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 9).assertEquals(visitor.calls.get(3)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 6).assertEquals(visitor.calls.get(4)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 6).assertEquals(visitor.calls.get(5)); - */ + + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 9).assertEquals(parallelCalls.get(1)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 6).assertEquals(parallelCalls.get(2)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 12).assertEquals(parallelCalls.get(3)); + + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 7).assertEquals(parallelCalls.get(4)); + new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 7).assertEquals(parallelCalls.get(5)); + } } From 16271559a188b8a0ced0969d3d41a7901d671a03 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 11 Aug 2016 22:16:54 -0400 Subject: [PATCH 073/104] Add standard visitor and convenience methods for forkscanner/simpleChunkVisitor --- .../workflow/graphanalysis/ForkScanner.java | 26 +++++++- .../graphanalysis/StandardChunkVisitor.java | 63 +++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index f941084b..10f29154 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -96,6 +96,18 @@ public enum NodeType { protected NodeType currentType; protected NodeType nextType; + public ForkScanner() { + + } + + public ForkScanner(@Nonnull Collection heads) { + this.setup(heads); + } + + public ForkScanner(@Nonnull Collection heads, @Nonnull Collection blackList) { + this.setup(heads, blackList); + } + @Override protected void reset() { parallelBlockStartStack.clear(); @@ -514,8 +526,20 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection return output; } + public static void visitSimpleChunks(@Nonnull Collection heads, @Nonnull Collection blacklist, @Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) { + ForkScanner scanner = new ForkScanner(); + scanner.setup(heads, blacklist); + scanner.visitSimpleChunks(visitor, finder); + } + + public static void visitSimpleChunks(@Nonnull Collection heads, @Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) { + ForkScanner scanner = new ForkScanner(); + scanner.setup(heads); + scanner.visitSimpleChunks(visitor, finder); + } + /** Walk through flows */ - public void visitSimpleChunks(SimpleChunkVisitor visitor, ChunkFinder finder) { + public void visitSimpleChunks(@Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) { FlowNode prev = null; if (finder.isStartInsideChunk() && hasNext()) { visitor.chunkEnd(this.myNext, null, this); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java new file mode 100644 index 00000000..c82800fa --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java @@ -0,0 +1,63 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Simple handler for linear chunks (basic stages, etc), designed to be extended + * Note: does not handle parallels or nesting + * Extend {@link #handleChunkDone(MemoryFlowChunk)} to gather up final chunks + * Extend {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} to gather data about nodes in a chunk + * @author Sam Van Oort + */ +public class StandardChunkVisitor implements SimpleChunkVisitor { + + protected MemoryFlowChunk chunk = new MemoryFlowChunk(); + + + /** Override me to do something once the chunk is finished + * Note: the chunk will be mutated directly, so you need to copy it if you want to do something + */ + protected void handleChunkDone(@Nonnull MemoryFlowChunk chunk) { + // NO-OP initially + } + + protected void resetChunk(@Nonnull MemoryFlowChunk chunk) { + chunk.setFirstNode(null); + chunk.setLastNode(null); + chunk.setNodeBefore(null); + chunk.setNodeAfter(null); + } + + @Override + public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) { + chunk.setNodeBefore(beforeBlock); + chunk.setFirstNode(startNode); + handleChunkDone(chunk); + resetChunk(chunk); + } + + @Override + public void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner) { + chunk.setLastNode(endNode); + chunk.setNodeAfter(afterChunk); + } + + @Override + public void parallelStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchNode, @Nonnull ForkScanner scanner) {} + + @Override + public void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner) {} + + @Override + public void parallelBranchStart(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchStartNode, @Nonnull ForkScanner scanner) {} + + @Override + public void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner) {} + + /** Extend me to do something with nodes inside a chunk */ + @Override + public void atomNode(@CheckForNull FlowNode before, @Nonnull FlowNode atomNode, @CheckForNull FlowNode after, @Nonnull ForkScanner scan) {} +} From e1988cd14ac74d9823255b1ce527afdabab71ace Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 10:19:04 -0400 Subject: [PATCH 074/104] Cleanup for reviews --- .../workflow/graphanalysis/FlowChunk.java | 6 ++--- .../graphanalysis/FlowChunkWithContext.java | 4 +-- .../graphanalysis/FlowNodeVisitor.java | 2 +- .../graphanalysis/ParallelFlowChunk.java | 4 +-- .../graphanalysis/SimpleChunkVisitor.java | 25 +++++++++++++------ 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java index b0951ead..6debf380 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -24,10 +24,8 @@ package org.jenkinsci.plugins.workflow.graphanalysis; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.FlowNode; -import javax.annotation.CheckForNull; import javax.annotation.Nonnull; /** @@ -48,8 +46,8 @@ */ public interface FlowChunk { @Nonnull - public FlowNode getFirstNode(); + FlowNode getFirstNode(); @Nonnull - public FlowNode getLastNode(); + FlowNode getLastNode(); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java index d6527989..17d8383b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java @@ -9,9 +9,9 @@ public interface FlowChunkWithContext extends FlowChunk { /** Return the node before this chunk, or null if it is the end */ @CheckForNull - public FlowNode getNodeBefore(); + FlowNode getNodeBefore(); /** Return the node after this chunk, or null if it is the end */ @CheckForNull - public FlowNode getNodeAfter(); + FlowNode getNodeAfter(); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java index b869c9b5..0249459b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java @@ -42,5 +42,5 @@ public interface FlowNodeVisitor { * @param f Node to visit * @return False if we should stop visiting nodes */ - public boolean visit(@Nonnull FlowNode f); + boolean visit(@Nonnull FlowNode f); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java index ae08052b..024d7a5c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelFlowChunk.java @@ -10,8 +10,8 @@ public interface ParallelFlowChunk extends FlowChu /** Returns the branches of a parallel flow chunk, mapped by branch name and parallel branch block */ @Nonnull - public Map getBranches(); + Map getBranches(); @Nonnull - public void setBranch(@Nonnull String branchName, @Nonnull ChunkType branchBlock); + void setBranch(@Nonnull String branchName, @Nonnull ChunkType branchBlock); } diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index 8419b6aa..a98bc19c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -35,10 +35,11 @@ *

    Implementations get to decide how to use & handle chunks. *

    At a minimum they should handle:

    *
      - *
    • Unbalanced numbers of chunk start/end calls
    • + *
    • Unbalanced numbers of chunk start/end calls (for incomplete flows)
    • *
    • A chunk end with no beginning (runs to start of flow, or never began)
    • *
    • A chunk start with no end (ex: a block that hasn't completed running)
    • - *
    • Other starts/ends before we hit the closing one
    • + *
    • Other starts/ends before we hit the closing one (nesting)
    • + *
    • Atom nodes not within the current Chunk (visitor is responsible for handling state)
    • *
    * * Important implementation note: multiple callbacks can be invoked for a single node depending on its type. May not be invoked if we're inside an in-progress parallel + * @param parallelStartNode First node of parallel (BlockStartNode before the branches) + * @param branchEndNode Final node of the branch (may be BlockEndNode if done, otherwise just the last one executed) + * @param scanner + */ void parallelBranchEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode branchEndNode, @Nonnull ForkScanner scanner); /** - * Called for a flownode within the chunk that is neither start nor end. + * Called for a flownode neither start nor end. * Ways you may want to use this: accumulate pause time, collect errors, etc. + * Note: invocations don't guarantee whether or not you're within a marked chunk. * @param before Node before the current * @param atomNode The node itself * @param after Node after the current From a53b13e7d2ed9dce170e0b6b3b35bff8e4ceb350 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 11:15:12 -0400 Subject: [PATCH 075/104] Remove dangling file --- .gitignore.orig | 33 --------------------------------- 1 file changed, 33 deletions(-) delete mode 100644 .gitignore.orig diff --git a/.gitignore.orig b/.gitignore.orig deleted file mode 100644 index 1f64f169..00000000 --- a/.gitignore.orig +++ /dev/null @@ -1,33 +0,0 @@ -target -work -<<<<<<< HEAD - -# IntelliJ project files -*.iml -*.iws -*.ipr -.idea -out - -# eclipse project file -.settings -.classpath -.project -build - -# vim -*~ -*.swp - -# ctags -tags - -# OS X -.DS_Store - -# mvn versions:set -pom.xml.versionsBackup -======= -.idea -*.iml ->>>>>>> master From aa18ee5127f298c23376614dbf085b5e96ca67dc Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 12:26:20 -0400 Subject: [PATCH 076/104] Oh yeah I meant to change the versioning --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 747884ce..6e6a16d9 100644 --- a/pom.xml +++ b/pom.xml @@ -33,7 +33,7 @@ org.jenkins-ci.plugins.workflow workflow-api - 2.2.blockapis-SNAPSHOT + 2.2-blockapis-SNAPSHOT hpi Pipeline: API https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+API+Plugin From f22aba60b62e175be4f0f093c0bf8b1ca9ff0e4b Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 14:43:13 -0400 Subject: [PATCH 077/104] Better reset the pause time for StandardChunkVisitor --- .../plugins/workflow/graphanalysis/MemoryFlowChunk.java | 2 +- .../plugins/workflow/graphanalysis/StandardChunkVisitor.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index 20b4c3b8..6c3ed8c8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -39,7 +39,7 @@ public class MemoryFlowChunk implements FlowChunkWithContext { protected FlowNode lastNode; protected FlowNode nodeBefore; protected FlowNode nodeAfter; - private long pauseTimeMillis = -1; + private long pauseTimeMillis = 0; public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { this.setNodeBefore(before); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java index c82800fa..f5a1add2 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java @@ -29,6 +29,7 @@ protected void resetChunk(@Nonnull MemoryFlowChunk chunk) { chunk.setLastNode(null); chunk.setNodeBefore(null); chunk.setNodeAfter(null); + chunk.setPauseTimeMillis(0); } @Override From 9880dc876df76d31d472aee159ec24c895d4b887 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 15:03:48 -0400 Subject: [PATCH 078/104] Rename finder and add the stagefinder that I removed earlier --- ...elFinder.java => LabelledChunkFinder.java} | 2 +- .../graphanalysis/StageChunkFinder.java | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) rename src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/{LabelFinder.java => LabelledChunkFinder.java} (95%) create mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java similarity index 95% rename from src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java rename to src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java index e1ae2a1c..db4443da 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java @@ -13,7 +13,7 @@ * Creates chunks whenever you have a labelled linear block (not a parallel branch). * Created by @author Sam Van Oort */ -public class LabelFinder implements ChunkFinder { +public class LabelledChunkFinder implements ChunkFinder { public boolean isStartInsideChunk() { return true; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java new file mode 100644 index 00000000..4d737367 --- /dev/null +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java @@ -0,0 +1,21 @@ +package org.jenkinsci.plugins.workflow.graphanalysis; + +import org.jenkinsci.plugins.workflow.actions.LabelAction; +import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; +import org.jenkinsci.plugins.workflow.graph.FlowNode; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +/** + * Finds stages (legacy or block-scoped) + * @author Sam Van Oort + */ +public class StageChunkFinder extends LabelledChunkFinder { + @Override + public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { + LabelAction la = current.getAction(LabelAction.class); + return la != null && !(la instanceof ThreadNameAction); // Filters out parallels + } + +} From 700d009c23521a46c2be10c4bede36eef152034e Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 15:23:48 -0400 Subject: [PATCH 079/104] Remove unused BlockVisitor from early prototypes --- .../workflow/graphanalysis/BlockVisitor.java | 129 ------------------ 1 file changed, 129 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java deleted file mode 100644 index 514d69f6..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockVisitor.java +++ /dev/null @@ -1,129 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.jenkinsci.plugins.workflow.graph.BlockEndNode; -import org.jenkinsci.plugins.workflow.graph.BlockStartNode; -import org.jenkinsci.plugins.workflow.graph.FlowEndNode; -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import java.util.ArrayDeque; - -/** - * Visitor that stores a list of block scopes - * This MUST be coupled with the ForkScanner to work correctly because of its iteration order - * This is because it guarantees block-scoped traversal, where every end occurs before a start - * Created by svanoort on 5/12/16. - */ -@SuppressFBWarnings -public class BlockVisitor implements FlowNodeVisitor { - - protected ArrayDeque scopes = new ArrayDeque(); - protected IdFlowBlock currentBlock = new IdFlowBlock(); - - public static interface FlowBlock { - @CheckForNull - public String getBlockStartNodeId(); - - @CheckForNull - public String getBlockEndNodeId(); - - @CheckForNull - public String getFirstChildId(); - - @CheckForNull - public String getLastChildId(); - } - - public class IdFlowBlock implements FlowBlock { - private String blockStartNodeId; - private String blockEndNodeId; - private String firstChildId; - private String lastChildId; - - public String getBlockStartNodeId() { - return blockStartNodeId; - } - - public void setBlockStartNodeId(String blockStartNodeId) { - this.blockStartNodeId = blockStartNodeId; - } - - public String getBlockEndNodeId() { - return blockEndNodeId; - } - - public void setBlockEndNodeId(String blockEndNodeId) { - this.blockEndNodeId = blockEndNodeId; - } - - public String getFirstChildId() { - return firstChildId; - } - - public void setFirstChildId(String firstChildId) { - this.firstChildId = firstChildId; - } - - public String getLastChildId() { - return lastChildId; - } - - public void setLastChildId(String lastChildId) { - this.lastChildId = lastChildId; - } - } - - // Block is closed, we pop it off the scope and do what we want with it - protected void popBlock() { - this.currentBlock = this.scopes.pop(); - } - - /** - * Enter a new block scope - * @param block Block that starts then new scope - */ - protected void pushBlock(@Nonnull IdFlowBlock block) { - this.scopes.push(this.currentBlock); - this.currentBlock = block; - } - - protected void addBlockChild(@Nonnull FlowNode f) { - if (currentBlock.getLastChildId() != null) { - currentBlock.setLastChildId(f.getId()); - } - currentBlock.setFirstChildId(f.getId()); - } - - /** - * Visit the flow node, and indicate if we should continue analysis - * - * @param f Node to visit - * @return False if we should stop visiting nodes - */ - public boolean visit(@Nonnull FlowNode f) { - if (f instanceof BlockEndNode) { - IdFlowBlock innerBlock = new IdFlowBlock(); - innerBlock.setBlockEndNodeId(f.getId()); - innerBlock.setBlockStartNodeId(((BlockEndNode) f).getId()); - pushBlock(innerBlock); - } else if (f instanceof BlockStartNode) { - String currentStartId = currentBlock.getBlockStartNodeId(); - if (currentStartId != null && currentBlock.getBlockStartNodeId() != null - && (currentStartId.equals(f.getId())) ) { - // We're done with this block's scope, move up one level - popBlock(); - } else { - // We're inside an unterminated block, add an empty block scope above it to contain it and pop off the current block - IdFlowBlock block = new IdFlowBlock(); - currentBlock.setBlockStartNodeId(f.getId()); - scopes.offer(new IdFlowBlock()); - popBlock(); - } - } else { // We're inside the current block - addBlockChild(f); - } - return true; - } -} From 586979c6f16dc9588565e1ffbdce612ae7f9a442 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 12 Aug 2016 15:56:48 -0400 Subject: [PATCH 080/104] Fix minor oopsy --- .../plugins/workflow/graphanalysis/LabelledChunkFinder.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java index db4443da..9a84fe29 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java @@ -32,7 +32,7 @@ public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode prev return false; } if (current instanceof BlockEndNode) { - BlockStartNode bsn = ((BlockEndNode) previous).getStartNode(); + BlockStartNode bsn = ((BlockEndNode) current).getStartNode(); if (isChunkStart(bsn, null)) { return true; } From 7bb581c6fa7d7cf6ea9c80d5852266dc2587b8ce Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 15 Aug 2016 16:17:16 -0400 Subject: [PATCH 081/104] Remove StageChunkFinder because BlockScopedStages need some additional deps to handle fully --- .../graphanalysis/StageChunkFinder.java | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java deleted file mode 100644 index 4d737367..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StageChunkFinder.java +++ /dev/null @@ -1,21 +0,0 @@ -package org.jenkinsci.plugins.workflow.graphanalysis; - -import org.jenkinsci.plugins.workflow.actions.LabelAction; -import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -/** - * Finds stages (legacy or block-scoped) - * @author Sam Van Oort - */ -public class StageChunkFinder extends LabelledChunkFinder { - @Override - public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { - LabelAction la = current.getAction(LabelAction.class); - return la != null && !(la instanceof ThreadNameAction); // Filters out parallels - } - -} From 7b9facaee17d5a3aae02fe4e2f393928f6e53761 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 17 Aug 2016 15:35:45 -0400 Subject: [PATCH 082/104] Cleanup author annotation misformatting for javadoc --- .../plugins/workflow/graphanalysis/AbstractFlowScanner.java | 2 +- .../plugins/workflow/graphanalysis/BlockChunkFinder.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java | 2 +- .../plugins/workflow/graphanalysis/DepthFirstScanner.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/Filterator.java | 2 +- .../plugins/workflow/graphanalysis/FilteratorImpl.java | 2 +- .../org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java | 2 +- .../plugins/workflow/graphanalysis/FlowNodeVisitor.java | 2 +- .../plugins/workflow/graphanalysis/FlowScanningUtils.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java | 2 +- .../plugins/workflow/graphanalysis/LabelledChunkFinder.java | 2 +- .../workflow/graphanalysis/LinearBlockHoppingScanner.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java | 2 +- .../plugins/workflow/graphanalysis/MemoryFlowChunk.java | 2 +- .../plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java | 2 +- .../plugins/workflow/graphanalysis/SimpleChunkVisitor.java | 2 +- .../plugins/workflow/graphanalysis/StandardChunkVisitor.java | 2 +- .../plugins/workflow/graphanalysis/FlowScannerTest.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index c86604a8..6a5057ca 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -86,7 +86,7 @@ * * * - * @author Sam Van Oort + * @author Sam Van Oort */ public abstract class AbstractFlowScanner implements Iterable , Filterator { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java index d739b59c..d7bbe21d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/BlockChunkFinder.java @@ -9,7 +9,7 @@ /** * Matches start and end of a block. Any block - * Created by @author Sam Van Oort + * @author Sam Van Oort */ public class BlockChunkFinder implements ChunkFinder { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java index 894e5cd1..286626a0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java @@ -7,7 +7,7 @@ /** * Matches the start and end of a chunk - * Created by @author Sam Van Oort + * @author Sam Van Oort */ public interface ChunkFinder { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index bfaeb43f..1f3ac415 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -40,7 +40,7 @@ * With parallel branches, the first branch is explored, then remaining branches are explored in reverse order. * *

    The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. - * @author Sam Van Oort + * @author Sam Van Oort */ public class DepthFirstScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java index 48289da4..b20394af 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -34,7 +34,7 @@ *

    As a rule, assume that returned Filterators wrap an iterator and pass calls to it. * Thus the iterator position will change if next() is called on the filtered versions. * Note also: you may filter a filterator, if needed. - * @author Sam Van Oort + * @author Sam Van Oort */ public interface Filterator extends Iterator { /** Returns a filtered view of the iterator, which calls the iterator until matches are found */ diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index 6ba13c6c..27b8fd1e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -30,7 +30,7 @@ import java.util.Iterator; /** Filters an iterator against a match predicate by wrapping an iterator - * @author Sam Van Oort + * @author Sam Van Oort */ class FilteratorImpl implements Filterator { private boolean hasNext = false; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java index 6debf380..58ff90ff 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -42,7 +42,7 @@ *

  • A mix of types in sequence, such as nested structures
  • * * - * @author Sam Van Oort + * @author Sam Van Oort */ public interface FlowChunk { @Nonnull diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java index 0249459b..0ff92cc1 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java @@ -33,7 +33,7 @@ * Interface used when examining a pipeline FlowNode graph node by node, and terminating when a condition is met * *

    This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} - * @author Sam Van Oort + * @author Sam Van Oort */ public interface FlowNodeVisitor { /** diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java index 3ccee1d8..2b2c9058 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -45,7 +45,7 @@ /** * Library of common functionality when analyzing/walking flow graphs - * @author Sam Van Oort + * @author Sam Van Oort */ public final class FlowScanningUtils { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 10f29154..d8dbe8f4 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -62,7 +62,7 @@ *

  • Branch information is available for use here
  • * * - * @author Sam Van Oort + * @author Sam Van Oort */ public class ForkScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java index 9a84fe29..aeae680d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java @@ -11,7 +11,7 @@ /** * Basically finds stages. *Technically* it's any block of nodes. * Creates chunks whenever you have a labelled linear block (not a parallel branch). - * Created by @author Sam Van Oort + * @author Sam Van Oort */ public class LabelledChunkFinder implements ChunkFinder { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index 9f385045..12b695ff 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -52,7 +52,7 @@ *
  • Locating the label applying to a given FlowNode (if any)
  • * * - * @author Sam Van Oort + * @author Sam Van Oort */ public class LinearBlockHoppingScanner extends LinearScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 8f273501..49da9ae1 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -40,7 +40,7 @@ *

    Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * *

    This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. - * @author Sam Van Oort + * @author Sam Van Oort */ public class LinearScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index 6c3ed8c8..16d1410e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -32,7 +32,7 @@ /** * FlowChunk that holds direct references to the {@link FlowNode} instances and context info * This makes it easy to use in analysis and visualizations, but inappropriate to retain in caches, etc - * @author Sam Van Oort + * @author Sam Van Oort */ public class MemoryFlowChunk implements FlowChunkWithContext { protected FlowNode firstNode; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java index 66ce728a..9a136ade 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ParallelMemoryFlowChunk.java @@ -35,7 +35,7 @@ /** * Corresponds to a parallel block, acts as an in-memory container that can plug into status/timing APIs - * @author Sam Van Oort + * @author Sam Van Oort */ public class ParallelMemoryFlowChunk extends MemoryFlowChunk implements ParallelFlowChunk { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index a98bc19c..7c5dd53e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -53,7 +53,7 @@ *

  • All the parallel methods are used to report on parallel status - helpful when we need to deal with parallels internal to chunks.
  • * * - * @author Sam Van Oort + * @author Sam Van Oort */ public interface SimpleChunkVisitor { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java index f5a1add2..3b11ef16 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java @@ -10,7 +10,7 @@ * Note: does not handle parallels or nesting * Extend {@link #handleChunkDone(MemoryFlowChunk)} to gather up final chunks * Extend {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} to gather data about nodes in a chunk - * @author Sam Van Oort + * @author Sam Van Oort */ public class StandardChunkVisitor implements SimpleChunkVisitor { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index 4cf588bf..fea95b61 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -53,7 +53,7 @@ /** * Tests for all the core parts of graph analysis except the ForkScanner, internals which is complex enough to merit its own tests - * @author Sam Van Oort + * @author Sam Van Oort */ public class FlowScannerTest { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java index 6b4984d6..16bcbb0c 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowTestUtils.java @@ -36,7 +36,7 @@ /** * Utilities for testing flow scanning - * @author Sam Van Oort + * @author Sam Van Oort */ public class FlowTestUtils { public static Predicate predicateMatchStepDescriptor(@Nonnull final String descriptorId) { From ae47cc2bad297880a0838bb8de9edbb1c94f1d9a Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 17 Aug 2016 15:51:07 -0400 Subject: [PATCH 083/104] Make all paragraph tags in javadocs not self-closing to placate javadocs --- .../plugins/workflow/actions/FlowNodeAction.java | 2 +- .../workflow/graphanalysis/AbstractFlowScanner.java | 10 +++++----- .../plugins/workflow/graphanalysis/ChunkFinder.java | 4 ++-- .../workflow/graphanalysis/DepthFirstScanner.java | 4 ++-- .../plugins/workflow/graphanalysis/Filterator.java | 2 +- .../plugins/workflow/graphanalysis/FlowChunk.java | 4 ++-- .../workflow/graphanalysis/FlowNodeVisitor.java | 2 +- .../plugins/workflow/graphanalysis/ForkScanner.java | 10 +++++----- .../graphanalysis/LinearBlockHoppingScanner.java | 4 ++-- .../workflow/graphanalysis/LinearScanner.java | 6 +++--- .../workflow/graphanalysis/SimpleChunkVisitor.java | 12 ++++++------ .../plugins/workflow/graphanalysis/package-info.java | 4 ++-- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java b/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java index 0b0a55f7..ebfa69a3 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java @@ -42,7 +42,7 @@ public interface FlowNodeAction extends Action { * Called by {@link FlowExecution#loadActions(FlowNode)} when * actions get loaded from persistent storage. * - *

    + *

    * This is more of an internal API between {@link FlowNode} and * {@link FlowExecution}. Not allowed to be called from outside. */ diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 6a5057ca..e5a1e11b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -43,9 +43,9 @@ /** * Core APIs and base logic for FlowScanners that extract information from a pipeline execution. * - *

    These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. + *

    These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. * - *

    This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: + *

    This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: *
      * - {@link #findFirstMatch(Collection, Collection, Predicate)}: find the first FlowNode matching predicate condition. * - {@link #filteredNodes(Collection, Collection, Predicate)}: return the collection of FlowNodes matching the predicate. @@ -56,11 +56,11 @@ * - Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized. *
    * - *

    All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. + *

    All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. * If blackList nodes are an empty collection or null, APIs will walk to the beginning of the FlowGraph. * Multiple blackList nodes are helpful for putting separate bounds on walking different parallel branches. * - *

    Key Points: + *

    Key Points: *
  • There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures).
  • *
  • Each implementation provides its own iteration order (described in its javadoc comments), * but it is generally unsafe to rely on parallel branches being visited in a specific order.
  • @@ -74,7 +74,7 @@ *
  • Allows for caching to be added inside a FlowScanner if desired, but caching is only useful when reused.
  • * * - *

    Suggested uses: + *

    Suggested uses: *
      *
    • Implement a {@link FlowNodeVisitor} that collects metrics from each FlowNode visited, and call visitAll to extract the data.
    • *
    • Find all flownodes of a given type (ex: stages), using {@link #filteredNodes(Collection, Collection, Predicate)}
    • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java index 286626a0..3c079ae6 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java @@ -25,8 +25,8 @@ public interface ChunkFinder { /** * Test if the current node is the end of a chunk (inclusive) * @param current Node to test for being end - *

      For a block, the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} - *

      For a legacy stage or marker, this will be first node of new stage (previous is the marker) + *

      For a block, the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} + *

      For a legacy stage or marker, this will be first node of new stage (previous is the marker) * @param previous Previous node, to use in testing chunk * @return True if current is the end of a chunk (inclusive) */ diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 1f3ac415..8b8eebec 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -36,10 +36,10 @@ /** Does a simple and somewhat efficient depth-first search of all FlowNodes in the DAG. * - *

      Iteration order: depth-first search, revisiting parallel branches once done. + *

      Iteration order: depth-first search, revisiting parallel branches once done. * With parallel branches, the first branch is explored, then remaining branches are explored in reverse order. * - *

      The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. + *

      The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort */ public class DepthFirstScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java index b20394af..fed4dafc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -31,7 +31,7 @@ /** Iterator that may be navigated through a filtered wrapper. * - *

      As a rule, assume that returned Filterators wrap an iterator and pass calls to it. + *

      As a rule, assume that returned Filterators wrap an iterator and pass calls to it. * Thus the iterator position will change if next() is called on the filtered versions. * Note also: you may filter a filterator, if needed. * @author Sam Van Oort diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java index 58ff90ff..453de963 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -30,9 +30,9 @@ /** * Common container interface for a series of {@link FlowNode}s with a logical start and end. - *

      We use this because every plugin has a different way of storing info about the nodes. + *

      We use this because every plugin has a different way of storing info about the nodes. * - *

      Common uses: + *

      Common uses: *
        *
      • A single FlowNode (when coupling with timing/status APIs)
      • *
      • A block (with a {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} and {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode})
      • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java index 0ff92cc1..3fd5a5e8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java @@ -32,7 +32,7 @@ /** * Interface used when examining a pipeline FlowNode graph node by node, and terminating when a condition is met * - *

        This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} + *

        This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} * @author Sam Van Oort */ public interface FlowNodeVisitor { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index d8dbe8f4..68c15cca 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -45,16 +45,16 @@ /** * Scanner that will scan down all forks when we hit parallel blocks before continuing, but generally runs in linear order - *

        Think of it as the opposite of {@link DepthFirstScanner}. + *

        Think of it as the opposite of {@link DepthFirstScanner}. * - *

        This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: + *

        This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: *
          *
        • Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner)
        • *
        • All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner)
        • *
        • For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels)
        • *
        * - *

        The big advantages of this approach: + *

        The big advantages of this approach: *
          *
        • Blocks are visited in the order they end (no backtracking) - helps with working a block at a time
        • *
        • Points are visited in linear order within a block (easy to use for analysis)
        • @@ -263,7 +263,7 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall * This works by walking back to construct the tree of parallel blocks covering all heads back to the Least Common Ancestor of all heads * (the top parallel block). One by one, as branches join, we remove them from the list of live pieces and replace with their common ancestor. * - *

          The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist: + *

          The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist: *
            *
          • We track FlowPieces, which are Forks (where branches merge) and FlowSegments (where there's a unforked sequence of nodes)
          • *
          • A map of FlowNode to its containing FlowPiece is created
          • @@ -283,7 +283,7 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall *
          • Each time we merge a branch in, we need to remove an entry from enclosing blocks & live pieces
          • *
          * - *

          There are some assumptions you need to know about to understand why this works: + *

          There are some assumptions you need to know about to understand why this works: *
            *
          • None of the pieces have multiple parents, since we only look at enclosing blocks (only be a BlockEndNodes for a parallel block have multipel parents)
          • *
          • No cycles exist in the graph
          • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index 12b695ff..e8e18a48 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -37,7 +37,7 @@ * Extension of {@link LinearScanner} that skips nested blocks at the current level, useful for finding enclosing blocks. * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow). * - *

            This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). + *

            This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). * *

            Specifically: *
              @@ -45,7 +45,7 @@ *
            • The only case where you visit branches of a parallel block is if you begin inside it.
            • *
            * - *

            Specific use cases: + *

            Specific use cases: *
              *
            • Finding out the executor workspace used to run a FlowNode
            • *
            • Finding the start of the parallel block enclosing the current node
            • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 49da9ae1..bf6dba88 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -34,12 +34,12 @@ /** * Scans through the flow graph in strictly linear fashion, visiting only the first branch in parallel blocks. * - *

              Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} + *

              Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} * This means that where are parallel branches, we will only visit a partial set of {@link FlowNode}s in the directed acyclic graph. * - *

              Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. + *

              Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * - *

              This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. + *

              This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. * @author Sam Van Oort */ public class LinearScanner extends AbstractFlowScanner { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index 7c5dd53e..3a3c4710 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -30,10 +30,10 @@ /** * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks. - *

              A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. + *

              A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. * - *

              Implementations get to decide how to use & handle chunks. - *

              At a minimum they should handle:

              + *

              Implementations get to decide how to use & handle chunks. + *

              At a minimum they should handle:

              *
                *
              • Unbalanced numbers of chunk start/end calls (for incomplete flows)
              • *
              • A chunk end with no beginning (runs to start of flow, or never began)
              • @@ -43,9 +43,9 @@ *
              * * Important implementation note: multiple callbacks can be invoked for a single node depending on its type.For example, we may capture parallels as chunks. + *

              For example, we may capture parallels as chunks. * - *

              Callbacks Reporting on chunk/parallel information:

              + *

              Callbacks Reporting on chunk/parallel information:

              *
                *
              • {@link #chunkStart(FlowNode, FlowNode, ForkScanner)} is called on the current node when we hit start of a boundary (inclusive)
              • *
              • {@link #chunkEnd(FlowNode, FlowNode, ForkScanner)} is called when we hit end of a boundary (inclusive)
              • @@ -94,7 +94,7 @@ public interface SimpleChunkVisitor { /** * Hit the end start of a parallel branch - *

                May not be invoked if we're inside an in-progress parallel + *

                May not be invoked if we're inside an in-progress parallel * @param parallelStartNode First node of parallel (BlockStartNode before the branches) * @param branchEndNode Final node of the branch (may be BlockEndNode if done, otherwise just the last one executed) * @param scanner diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java index 45ef7891..eaf625d0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java @@ -1,11 +1,11 @@ /** * Provides a library of methods to work with and analyze the graph of {@link org.jenkinsci.plugins.workflow.graph.FlowNode}s produced from a pipeline execution. * - *

                The core APIs are described in the javadocs for {@link org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner} + *

                The core APIs are described in the javadocs for {@link org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner} * But in general it provides for iteration through the Directed Acyclic Graph (DAG) of a flow, filtering, search for matches, and * visiting all nodes via internal iteration. * - *

                Static methods and a few implementations are also provided in {@link org.jenkinsci.plugins.workflow.graphanalysis.FlowScanningUtils}. + *

                Static methods and a few implementations are also provided in {@link org.jenkinsci.plugins.workflow.graphanalysis.FlowScanningUtils}. */ package org.jenkinsci.plugins.workflow.graphanalysis; \ No newline at end of file From d21d67a1f7bc77e8e65abd9038d9be0db4c66b1d Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 17 Aug 2016 16:02:46 -0400 Subject: [PATCH 084/104] Placate more javadocs complaints --- .../plugins/workflow/graphanalysis/AbstractFlowScanner.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index e5a1e11b..8afb4002 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -185,7 +185,7 @@ public boolean setup(@CheckForNull FlowNode head) { protected abstract void setHeads(@Nonnull Collection filteredHeads); /** - * Actual meat of the iteration, get the next node to visit, using & updating state as needed + * Actual meat of the iteration, get the next node to visit, using and updating state as needed * @param current Current node to use in generating next value * @param blackList Nodes that are not eligible for visiting * @return Next node to visit, or null if we've exhausted the node list diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index bf6dba88..93f74003 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -39,7 +39,7 @@ * *

                Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * - *

                This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. + *

                This is the fastest and simplest way to walk a flow, because you only care about a single node at a time. * @author Sam Van Oort */ public class LinearScanner extends AbstractFlowScanner { From 1678b7dab3ff58dadf8bdeff0079e9cc12acd110 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 17 Aug 2016 16:11:16 -0400 Subject: [PATCH 085/104] Fix more javadocs complaints and silence doclint nonsense --- pom.xml | 1 + .../plugins/workflow/graphanalysis/SimpleChunkVisitor.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 6e6a16d9..ef030905 100644 --- a/pom.xml +++ b/pom.xml @@ -63,6 +63,7 @@ 1.642.3 + -Xdoclint:none diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index 3a3c4710..a2c658b8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -42,7 +42,7 @@ *
              • Atom nodes not within the current Chunk (visitor is responsible for handling state)
              • *
              * - * Important implementation note: multiple callbacks can be invoked for a single node depending on its type.Important implementation note: multiple callbacks can be invoked for a single node depending on its type. *

              For example, we may capture parallels as chunks. * *

              Callbacks Reporting on chunk/parallel information:

              From 2030360cc8bb6c1a701e92ffec8fa70e613c62c5 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 19 Aug 2016 10:37:39 -0400 Subject: [PATCH 086/104] More exhaustive tests for SimpleVisitor and fix a pair of swapped args on AtomNode calls --- .../workflow/graphanalysis/ForkScanner.java | 2 +- .../graphanalysis/ForkScannerTest.java | 32 +++++++++++++++++++ .../workflow/graphanalysis/TestVisitor.java | 8 ++++- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 68c15cca..4597288d 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -558,7 +558,7 @@ public void visitSimpleChunks(@Nonnull SimpleChunkVisitor visitor, @Nonnull Chun boundary = true; } if (!boundary) { - visitor.atomNode(prev, f, myNext, this); + visitor.atomNode(myNext, f, prev, this); } // Trigger on parallels diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index 247d8e3b..56ba5643 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -66,6 +66,16 @@ public class ForkScannerTest { @Rule public JenkinsRule r = new JenkinsRule(); + public static Predicate predicateForCallEntryType(final TestVisitor.CallType type) { + return new Predicate() { + TestVisitor.CallType myType = type; + @Override + public boolean apply(TestVisitor.CallEntry input) { + return input.type != null && input.type == myType; + } + }; + } + /** Flow structure (ID - type) 2 - FlowStartNode (BlockStartNode) 3 - Echostep @@ -368,6 +378,28 @@ public void testSimpleVisitor() throws Exception { TestVisitor.CallEntry first = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_START, 2, -1, -1, -1); first.assertEquals(visitor.calls.get(18)); + int chunkStartCount = Iterables.size(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.CHUNK_START))); + int chunkEndCount = Iterables.size(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.CHUNK_END))); + Assert.assertEquals(4, chunkStartCount); + Assert.assertEquals(4, chunkEndCount); + + // Verify the AtomNode calls are correct + List < TestVisitor.CallEntry > atomNodeCalls = Lists.newArrayList(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.ATOM_NODE))); + Assert.assertEquals(5, atomNodeCalls.size()); + for (TestVisitor.CallEntry ce : atomNodeCalls) { + int beforeId = ce.ids[0]; + int atomNodeId = ce.ids[1]; + int afterId = ce.ids[2]; + int alwaysEmpty = ce.ids[3]; + Assert.assertTrue(ce+" beforeNodeId <= 0: "+beforeId, beforeId > 0); + Assert.assertTrue(ce + " atomNodeId <= 0: " + atomNodeId, atomNodeId > 0); + Assert.assertTrue(ce+" afterNodeId <= 0: "+afterId, afterId > 0); + Assert.assertEquals(-1, alwaysEmpty); + Assert.assertTrue(ce + "AtomNodeId >= afterNodeId", atomNodeId < afterId); + Assert.assertTrue(ce+ "beforeNodeId >= atomNodeId", beforeId < atomNodeId); + } + + List parallelCalls = Lists.newArrayList(Iterables.filter(visitor.calls, new Predicate() { @Override public boolean apply(TestVisitor.CallEntry input) { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java index 634b5a63..cb0f59ea 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/TestVisitor.java @@ -63,9 +63,15 @@ public void assertEquals(CallEntry test) { Assert.assertNotNull(test.type); Assert.assertArrayEquals(this.ids, test.ids); } + + @Override + public String toString() { + return "CallEntry: "+type+" Ids: "+Arrays.toString(ids); + } + } - public List calls = new ArrayList(); + public ArrayList calls = new ArrayList(); @Override public void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner) { From 6b6eba7119d5a408ede14a0188be4f5e20e2724f Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 19 Aug 2016 18:31:58 -0400 Subject: [PATCH 087/104] Address review comments from @oleg-nenashev --- .../workflow/graphanalysis/AbstractFlowScanner.java | 1 + .../plugins/workflow/graphanalysis/DepthFirstScanner.java | 2 ++ .../plugins/workflow/graphanalysis/ForkScanner.java | 8 ++++++-- .../workflow/graphanalysis/LinearBlockHoppingScanner.java | 2 ++ .../plugins/workflow/graphanalysis/LinearScanner.java | 2 ++ .../plugins/workflow/graphanalysis/MemoryFlowChunk.java | 8 ++++---- 6 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 8afb4002..f0b82acb 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -86,6 +86,7 @@ *
            *
          * + * Implementations are generally NOT threadsafe and should be so annotated * @author Sam Van Oort */ public abstract class AbstractFlowScanner implements Iterable , Filterator { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 8b8eebec..9df62c00 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -28,6 +28,7 @@ import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.ArrayDeque; import java.util.Collection; import java.util.HashSet; @@ -42,6 +43,7 @@ *

          The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort */ +@NotThreadSafe public class DepthFirstScanner extends AbstractFlowScanner { protected ArrayDeque queue; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 4597288d..5ccf607f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -34,6 +34,7 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; @@ -64,12 +65,15 @@ * * @author Sam Van Oort */ +@NotThreadSafe public class ForkScanner extends AbstractFlowScanner { + @CheckForNull public NodeType getCurrentType() { return currentType; } + @CheckForNull public NodeType getNextType() { return nextType; } @@ -93,8 +97,8 @@ public enum NodeType { private boolean walkingFromFinish = false; - protected NodeType currentType; - protected NodeType nextType; + protected NodeType currentType = null; + protected NodeType nextType = null; public ForkScanner() { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index e8e18a48..11bc7363 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -30,6 +30,7 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.Collection; import java.util.List; @@ -54,6 +55,7 @@ * * @author Sam Van Oort */ +@NotThreadSafe public class LinearBlockHoppingScanner extends LinearScanner { @Override diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 93f74003..e8b34284 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -27,6 +27,7 @@ import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -42,6 +43,7 @@ *

          This is the fastest and simplest way to walk a flow, because you only care about a single node at a time. * @author Sam Van Oort */ +@NotThreadSafe public class LinearScanner extends AbstractFlowScanner { @Override diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java index 16d1410e..43bba815 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/MemoryFlowChunk.java @@ -35,10 +35,10 @@ * @author Sam Van Oort */ public class MemoryFlowChunk implements FlowChunkWithContext { - protected FlowNode firstNode; - protected FlowNode lastNode; - protected FlowNode nodeBefore; - protected FlowNode nodeAfter; + protected FlowNode firstNode = null; + protected FlowNode lastNode = null; + protected FlowNode nodeBefore = null; + protected FlowNode nodeAfter = null; private long pauseTimeMillis = 0; public MemoryFlowChunk(@CheckForNull FlowNode before, @Nonnull FlowNode firstNode, @Nonnull FlowNode lastNode, @CheckForNull FlowNode nodeAfter) { From 33e079e1e9b161e58cfdab4d224c1ee036117e89 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Sun, 21 Aug 2016 15:36:58 -0400 Subject: [PATCH 088/104] Fix an off-by-one case in ForkScanner with parallels and add more extensive test coverage --- .../workflow/graphanalysis/ForkScanner.java | 9 +-- .../graphanalysis/ForkScannerTest.java | 57 +++++++++++++++++-- 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 5ccf607f..bcbc3046 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -149,13 +149,12 @@ public boolean isWalkingFromFinish() { /** Tracks state for parallel blocks, so we can ensure all are visited and know the branch starting point */ protected static class ParallelBlockStart { protected BlockStartNode forkStart; // This is the node with child branches - protected int remainingBranches; protected int totalBranches; protected ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { this.forkStart = forkStart; - this.remainingBranches = branchCount; + this.totalBranches = branchCount; } /** Strictly for internal use in the least common ancestor problem */ @@ -248,7 +247,6 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall ParallelBlockStart start = new ParallelBlockStart(); start.totalBranches = f.following.size(); start.forkStart = f.forkStart; - start.remainingBranches = start.totalBranches; start.unvisited = new ArrayDeque(); // Add the nodes to the parallel starts here @@ -378,7 +376,6 @@ protected void setHeads(@Nonnull Collection heads) { myCurrent = currentParallelStart.unvisited.pop(); myNext = myCurrent; nextType = NodeType.PARALLEL_BRANCH_END; - currentParallelStart.remainingBranches--; walkingFromFinish = false; } else { FlowNode f = heads.iterator().next(); @@ -432,7 +429,6 @@ protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start, branches.size()); output = branches.pop(); parallelBlockStart.totalBranches = parents.size(); - parallelBlockStart.remainingBranches--; parallelBlockStart.unvisited = branches; if (currentParallelStart != null) { @@ -452,7 +448,7 @@ protected FlowNode hitParallelStart() { FlowNode output = null; if (currentParallelStart != null) { - if ((currentParallelStart.remainingBranches--) <= 0) { // Strip off a completed branch + if (currentParallelStart.unvisited.isEmpty()) { // Strip off a completed branch // We finished a nested set of parallel branches, visit the head and move up a level output = currentParallelStartNode; @@ -522,7 +518,6 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { output = currentParallelStart.unvisited.pop(); nextType = NodeType.PARALLEL_BRANCH_END; - currentParallelStart.remainingBranches--; } if (output == null) { nextType = null; diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index 56ba5643..cf974bba 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -33,6 +33,7 @@ import org.jenkinsci.plugins.workflow.cps.steps.ParallelStep; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; +import org.jenkinsci.plugins.workflow.graph.FlowGraphWalker; import org.jenkinsci.plugins.workflow.graph.FlowNode; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; @@ -48,6 +49,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; @@ -201,7 +203,6 @@ public void testForkedScanner() throws Exception { ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; Assert.assertEquals(2, start.totalBranches); - Assert.assertEquals(1, start.remainingBranches); Assert.assertEquals(1, start.unvisited.size()); Assert.assertEquals(exec.getNode("4"), start.forkStart); @@ -330,7 +331,6 @@ public void testLeastCommonAncestor() throws Exception { ForkScanner.ParallelBlockStart start = starts.peek(); Assert.assertEquals(2, start.totalBranches); Assert.assertEquals(2, start.unvisited.size()); - Assert.assertEquals(2, start.remainingBranches); Assert.assertEquals(exec.getNode("4"), start.forkStart); Assert.assertArrayEquals(heads.toArray(), start.unvisited.toArray()); @@ -344,12 +344,10 @@ public void testLeastCommonAncestor() throws Exception { ForkScanner.ParallelBlockStart inner = starts.getFirst(); ForkScanner.ParallelBlockStart outer = starts.getLast(); - Assert.assertEquals(2, inner.remainingBranches); Assert.assertEquals(2, inner.totalBranches); Assert.assertEquals(2, inner.unvisited.size()); Assert.assertEquals(exec.getNode("12"), inner.forkStart); - Assert.assertEquals(2, outer.remainingBranches); Assert.assertEquals(2, outer.totalBranches); Assert.assertEquals(1, outer.unvisited.size()); Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek()); @@ -424,4 +422,55 @@ public boolean apply(TestVisitor.CallEntry input) { new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 7).assertEquals(parallelCalls.get(5)); } + + /** Checks for off-by one cases with multiple parallel */ + @Test + public void testTripleParallel() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "TripleParallel"); + job.setDefinition(new CpsFlowDefinition( + "stage 'test'\n"+ // Id 3, Id 2 before that has the FlowStartNode + "parallel 'unit':{\n" + // Id 4 starts parallel, Id 7 is the block start for the unit branch + " echo \"Unit testing...\"\n" + // Id 10 + "},'integration':{\n" + // Id 11 is unit branch end, Id 8 is the branch start for integration branch + " echo \"Integration testing...\"\n" + // Id 12 + "}, 'ui':{\n" + // Id 13 in integration branch end, Id 9 is branch start for UI branch + " echo \"UI testing...\"\n" + // Id 14 + "}" // Node 15 is UI branch end node, Node 16 is Parallel End node, Node 17 is FlowWendNode + )); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + + ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE); + FlowExecution exec = b.getExecution(); + ForkScanner f = new ForkScanner(); + f.setup(exec.getCurrentHeads()); + TestVisitor visitor = new TestVisitor(); + f.visitSimpleChunks(visitor, new BlockChunkFinder()); + + ArrayList parallels = Lists.newArrayList(Iterables.filter(visitor.calls, + Predicates.or( + predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_START), + predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_END)) + ) + ); + Assert.assertEquals(6, parallels.size()); + + // Visiting from partially completed branches + // Verify we still get appropriate parallels callbacks for a branch end + // even if in-progress and no explicit end node + ArrayList ends = new ArrayList(); + ends.add(exec.getNode("11")); + ends.add(exec.getNode("12")); + ends.add(exec.getNode("14")); + visitor = new TestVisitor(); + f.setup(ends); + f.visitSimpleChunks(visitor, new BlockChunkFinder()); + parallels = Lists.newArrayList(Iterables.filter(visitor.calls, + Predicates.or( + predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_START), + predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_END)) + ) + ); + Assert.assertEquals(6, parallels.size()); + Assert.assertEquals(17, visitor.calls.size()); + } } From 56e42eb1e9a73951cbc0656dde6b827c1228c974 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Sun, 21 Aug 2016 15:38:52 -0400 Subject: [PATCH 089/104] Restrive access to some of ForkScanner internals --- .../workflow/graphanalysis/ForkScanner.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index bcbc3046..c8e6840a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -97,8 +97,8 @@ public enum NodeType { private boolean walkingFromFinish = false; - protected NodeType currentType = null; - protected NodeType nextType = null; + NodeType currentType = null; + NodeType nextType = null; public ForkScanner() { @@ -147,12 +147,12 @@ public boolean isWalkingFromFinish() { } /** Tracks state for parallel blocks, so we can ensure all are visited and know the branch starting point */ - protected static class ParallelBlockStart { - protected BlockStartNode forkStart; // This is the node with child branches - protected int totalBranches; - protected ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet + static class ParallelBlockStart { + BlockStartNode forkStart; // This is the node with child branches + int totalBranches; + ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet - protected ParallelBlockStart(BlockStartNode forkStart, int branchCount) { + ParallelBlockStart(BlockStartNode forkStart, int branchCount) { this.forkStart = forkStart; this.totalBranches = branchCount; } @@ -414,7 +414,7 @@ public int getParallelDepth() { * @param parents Parent nodes that end here * @return FlowNode myNext node to visit */ - protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { + FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { BlockStartNode start = endNode.getStartNode(); ArrayDeque branches = new ArrayDeque(); @@ -444,7 +444,7 @@ protected FlowNode hitParallelEnd(BlockEndNode endNode, List parents, * Invoked when we complete parallel block, walking from the head (so encountered after the end) * @return FlowNode if we're the last node */ - protected FlowNode hitParallelStart() { + FlowNode hitParallelStart() { FlowNode output = null; if (currentParallelStart != null) { From 94e91692324293dca74129244b30810974600ce2 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Sun, 21 Aug 2016 15:47:26 -0400 Subject: [PATCH 090/104] Remove unused totalBranches field in ForkScanner --- .../plugins/workflow/graphanalysis/ForkScanner.java | 8 ++------ .../plugins/workflow/graphanalysis/ForkScannerTest.java | 4 ---- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index c8e6840a..5a1b6cb7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -149,12 +149,10 @@ public boolean isWalkingFromFinish() { /** Tracks state for parallel blocks, so we can ensure all are visited and know the branch starting point */ static class ParallelBlockStart { BlockStartNode forkStart; // This is the node with child branches - int totalBranches; ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet - ParallelBlockStart(BlockStartNode forkStart, int branchCount) { + ParallelBlockStart(BlockStartNode forkStart) { this.forkStart = forkStart; - this.totalBranches = branchCount; } /** Strictly for internal use in the least common ancestor problem */ @@ -245,7 +243,6 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall for (Fork f : parallelForks) { // Do processing to assign heads to flowsegments ParallelBlockStart start = new ParallelBlockStart(); - start.totalBranches = f.following.size(); start.forkStart = f.forkStart; start.unvisited = new ArrayDeque(); @@ -426,9 +423,8 @@ FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection FlowNode output = null; if (branches.size() > 0) { // Push another branch start - ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start, branches.size()); + ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start); output = branches.pop(); - parallelBlockStart.totalBranches = parents.size(); parallelBlockStart.unvisited = branches; if (currentParallelStart != null) { diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index cf974bba..d1af6010 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -202,7 +202,6 @@ public void testForkedScanner() throws Exception { Assert.assertEquals(exec.getNode("4"), scanner.currentParallelStartNode); ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; - Assert.assertEquals(2, start.totalBranches); Assert.assertEquals(1, start.unvisited.size()); Assert.assertEquals(exec.getNode("4"), start.forkStart); @@ -329,7 +328,6 @@ public void testLeastCommonAncestor() throws Exception { Assert.assertEquals(1, starts.size()); ForkScanner.ParallelBlockStart start = starts.peek(); - Assert.assertEquals(2, start.totalBranches); Assert.assertEquals(2, start.unvisited.size()); Assert.assertEquals(exec.getNode("4"), start.forkStart); Assert.assertArrayEquals(heads.toArray(), start.unvisited.toArray()); @@ -344,11 +342,9 @@ public void testLeastCommonAncestor() throws Exception { ForkScanner.ParallelBlockStart inner = starts.getFirst(); ForkScanner.ParallelBlockStart outer = starts.getLast(); - Assert.assertEquals(2, inner.totalBranches); Assert.assertEquals(2, inner.unvisited.size()); Assert.assertEquals(exec.getNode("12"), inner.forkStart); - Assert.assertEquals(2, outer.totalBranches); Assert.assertEquals(1, outer.unvisited.size()); Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek()); Assert.assertEquals(exec.getNode("4"), outer.forkStart); From 6cd4215feba3923cc902f37bec4e5026344eb433 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 23 Aug 2016 11:12:10 -0400 Subject: [PATCH 091/104] Address review comments from @oleg-nenashev --- .../plugins/workflow/graphanalysis/FlowChunkWithContext.java | 2 +- .../jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java index 17d8383b..1285808b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunkWithContext.java @@ -7,7 +7,7 @@ /** FlowChunk with information about what comes before/after */ public interface FlowChunkWithContext extends FlowChunk { - /** Return the node before this chunk, or null if it is the end */ + /** Return the node before this chunk, or null if it is the beginning */ @CheckForNull FlowNode getNodeBefore(); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 5a1b6cb7..787fe22b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -575,7 +575,7 @@ public void visitSimpleChunks(@Nonnull SimpleChunkVisitor visitor, @Nonnull Chun visitor.parallelBranchStart(parallelStart, myCurrent, this); break; default: - break; + throw new IllegalStateException("Unhandled type for current node"); } } } From 9d05aa693934dbc738379a6d4eac9704831e5ce1 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 23 Aug 2016 13:56:17 -0400 Subject: [PATCH 092/104] Apply a few small review changes --- .../plugins/workflow/graphanalysis/AbstractFlowScanner.java | 4 ++-- .../plugins/workflow/graphanalysis/FilteratorImpl.java | 6 +++--- .../plugins/workflow/graphanalysis/FlowScanningUtils.java | 2 +- .../plugins/workflow/graphanalysis/ForkScanner.java | 6 +++++- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index c86604a8..5022e985 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -106,7 +106,7 @@ protected Collection convertToFastCheckable(@CheckForNull Collection MAX_LIST_CHECK_SIZE ? new HashSet(nodeCollection) : nodeCollection; @@ -272,7 +272,7 @@ public FlowNode findFirstMatch(@CheckForNull FlowNode head, @Nonnull Predicate matchPredicate) { - if (exec != null && exec.getCurrentHeads() != null) { + if (exec != null && exec.getCurrentHeads() != null && !exec.getCurrentHeads().isEmpty()) { return this.findFirstMatch(exec.getCurrentHeads(), null, matchPredicate); } return null; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index 6ba13c6c..1dd769b8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -34,9 +34,9 @@ */ class FilteratorImpl implements Filterator { private boolean hasNext = false; - private T nextVal; - private Iterator wrapped; - private Predicate matchCondition; + private T nextVal = null; + private Iterator wrapped = null; + private Predicate matchCondition = null; public FilteratorImpl filter(Predicate matchCondition) { return new FilteratorImpl(this, matchCondition); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java index e2ae479d..4c42ffbd 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -82,7 +82,7 @@ public boolean apply(FlowNode input) { * @return Iterator that returns all enclosing BlockStartNodes from the inside out. */ @Nonnull - public static Filterator filterableEnclosingBlocks(@Nonnull FlowNode f) { + public static Filterator fetchEnclosingBlocks(@Nonnull FlowNode f) { LinearBlockHoppingScanner scanner = new LinearBlockHoppingScanner(); scanner.setup(f); return scanner.filter(MATCH_BLOCK_START); diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 05ae4efc..c52abc22 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -109,6 +109,7 @@ interface FlowPiece { // Mostly a marker } /** Linear (no parallels) run of FLowNodes */ + // TODO see if this can be replaced with a FlowChunk acting as a container class for a list of FlowNodes static class FlowSegment implements FlowPiece { ArrayList visited = new ArrayList(); FlowPiece after; @@ -125,6 +126,7 @@ public boolean isLeaf() { * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments) * @param joinPoint Node where the branches intersect/meet (fork point) * @param joiningBranch Flow piece that is joining this + * @throws IllegalStateException When you try to split a segment on a node that it doesn't contain, or invalid graph structure * @return Recreated fork */ Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode joinPoint, @Nonnull FlowPiece joiningBranch) { @@ -167,6 +169,8 @@ public void add(FlowNode f) { } /** Internal class used for constructing the LeastCommonAncestor structure */ + // TODO see if this can be replaced with a FlowChunk acting as a container class for parallels + // I.E. ParallelMemoryFlowChunk or similar static class Fork extends ParallelBlockStart implements FlowPiece { List following = new ArrayList(); @@ -246,7 +250,7 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter for (FlowNode f : heads) { - iterators.add(FlowScanningUtils.filterableEnclosingBlocks(f)); + iterators.add(FlowScanningUtils.fetchEnclosingBlocks(f)); FlowSegment b = new FlowSegment(); b.add(f); livePieces.add(b); From d332d8c6b665fd8b23a906c445a7b3a23b9aab9f Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 23 Aug 2016 14:32:05 -0400 Subject: [PATCH 093/104] Re-enable linting just to see how much pain there is --- pom.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/pom.xml b/pom.xml index ef030905..6e6a16d9 100644 --- a/pom.xml +++ b/pom.xml @@ -63,7 +63,6 @@ 1.642.3 - -Xdoclint:none From 9067f00e722ca2fc290869fa12f395ea4f95c5d1 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 03:06:30 -0400 Subject: [PATCH 094/104] Javadocs update --- .../workflow/actions/FlowNodeAction.java | 2 +- .../graphanalysis/AbstractFlowScanner.java | 10 +++--- .../workflow/graphanalysis/ChunkFinder.java | 7 +++-- .../graphanalysis/DepthFirstScanner.java | 4 +-- .../workflow/graphanalysis/Filterator.java | 2 +- .../workflow/graphanalysis/FlowChunk.java | 4 +-- .../graphanalysis/FlowNodeVisitor.java | 2 +- .../workflow/graphanalysis/ForkScanner.java | 15 ++++++--- .../graphanalysis/LabelledChunkFinder.java | 10 ++++-- .../LinearBlockHoppingScanner.java | 6 ++-- .../workflow/graphanalysis/LinearScanner.java | 6 ++-- .../graphanalysis/SimpleChunkVisitor.java | 31 ++++++++++++------- .../graphanalysis/StandardChunkVisitor.java | 13 ++++---- .../workflow/graphanalysis/package-info.java | 4 +-- 14 files changed, 67 insertions(+), 49 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java b/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java index ebfa69a3..0b0a55f7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/actions/FlowNodeAction.java @@ -42,7 +42,7 @@ public interface FlowNodeAction extends Action { * Called by {@link FlowExecution#loadActions(FlowNode)} when * actions get loaded from persistent storage. * - *

          + *

          * This is more of an internal API between {@link FlowNode} and * {@link FlowExecution}. Not allowed to be called from outside. */ diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index f0b82acb..8afd9e08 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -43,9 +43,9 @@ /** * Core APIs and base logic for FlowScanners that extract information from a pipeline execution. * - *

          These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. + *

          These iterate through the directed acyclic graph (DAG) or "flow graph" of {@link FlowNode}s produced when a pipeline runs. * - *

          This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: + *

          This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: *

            * - {@link #findFirstMatch(Collection, Collection, Predicate)}: find the first FlowNode matching predicate condition. * - {@link #filteredNodes(Collection, Collection, Predicate)}: return the collection of FlowNodes matching the predicate. @@ -56,11 +56,11 @@ * - Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized. *
          * - *

          All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. + *

          All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. * If blackList nodes are an empty collection or null, APIs will walk to the beginning of the FlowGraph. * Multiple blackList nodes are helpful for putting separate bounds on walking different parallel branches. * - *

          Key Points: + *

          Key Points: *

        • There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures).
        • *
        • Each implementation provides its own iteration order (described in its javadoc comments), * but it is generally unsafe to rely on parallel branches being visited in a specific order.
        • @@ -74,7 +74,7 @@ *
        • Allows for caching to be added inside a FlowScanner if desired, but caching is only useful when reused.
        • *
        * - *

        Suggested uses: + *

        Suggested uses: *

          *
        • Implement a {@link FlowNodeVisitor} that collects metrics from each FlowNode visited, and call visitAll to extract the data.
        • *
        • Find all flownodes of a given type (ex: stages), using {@link #filteredNodes(Collection, Collection, Predicate)}
        • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java index 3c079ae6..f326ecf8 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ChunkFinder.java @@ -6,7 +6,8 @@ import javax.annotation.Nonnull; /** - * Matches the start and end of a chunk + * Used to define the start and end of a {@link FlowChunk} to split a {@link org.jenkinsci.plugins.workflow.flow.FlowExecution} + * (For use with a {@link SimpleChunkVisitor} in the {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} * @author Sam Van Oort */ public interface ChunkFinder { @@ -25,8 +26,8 @@ public interface ChunkFinder { /** * Test if the current node is the end of a chunk (inclusive) * @param current Node to test for being end - *

          For a block, the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} - *

          For a legacy stage or marker, this will be first node of new stage (previous is the marker) + *

          For a block, the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} + *

          For a legacy stage or marker, this will be first node of new stage (previous is the marker) * @param previous Previous node, to use in testing chunk * @return True if current is the end of a chunk (inclusive) */ diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 9df62c00..59afbedc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -37,10 +37,10 @@ /** Does a simple and somewhat efficient depth-first search of all FlowNodes in the DAG. * - *

          Iteration order: depth-first search, revisiting parallel branches once done. + *

          Iteration order: depth-first search, revisiting parallel branches once done. * With parallel branches, the first branch is explored, then remaining branches are explored in reverse order. * - *

          The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. + *

          The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort */ @NotThreadSafe diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java index fed4dafc..2eacc197 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/Filterator.java @@ -31,7 +31,7 @@ /** Iterator that may be navigated through a filtered wrapper. * - *

          As a rule, assume that returned Filterators wrap an iterator and pass calls to it. + *

          As a rule, assume that returned Filterators wrap an iterator and pass calls to it. * Thus the iterator position will change if next() is called on the filtered versions. * Note also: you may filter a filterator, if needed. * @author Sam Van Oort diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java index 453de963..6bce6d0c 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowChunk.java @@ -30,9 +30,9 @@ /** * Common container interface for a series of {@link FlowNode}s with a logical start and end. - *

          We use this because every plugin has a different way of storing info about the nodes. + *

          We use this because every plugin has a different way of storing info about the nodes. * - *

          Common uses: + *

          Common uses: *

            *
          • A single FlowNode (when coupling with timing/status APIs)
          • *
          • A block (with a {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} and {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode})
          • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java index 3fd5a5e8..790963e0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowNodeVisitor.java @@ -32,7 +32,7 @@ /** * Interface used when examining a pipeline FlowNode graph node by node, and terminating when a condition is met * - *

            This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} + *

            This is intended to couple with {@link AbstractFlowScanner#visitAll(Collection, FlowNodeVisitor)} * @author Sam Van Oort */ public interface FlowNodeVisitor { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index 787fe22b..5ab55da0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -46,16 +46,16 @@ /** * Scanner that will scan down all forks when we hit parallel blocks before continuing, but generally runs in linear order - *

            Think of it as the opposite of {@link DepthFirstScanner}. + *

            Think of it as the opposite of {@link DepthFirstScanner}. * - *

            This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: + *

            This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: *

              *
            • Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner)
            • *
            • All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner)
            • *
            • For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels)
            • *
            * - *

            The big advantages of this approach: + *

            The big advantages of this approach: *

              *
            • Blocks are visited in the order they end (no backtracking) - helps with working a block at a time
            • *
            • Points are visited in linear order within a block (easy to use for analysis)
            • @@ -80,10 +80,15 @@ public NodeType getNextType() { /** Used to recognize special nodes */ public enum NodeType { + /** Not any of the parallel types */ NORMAL, + /**{@link BlockStartNode} starting a parallel block */ PARALLEL_START, + /**{@link BlockEndNode} ending a parallel block */ PARALLEL_END, + /**{@link BlockStartNode} starting a branch of a parallel */ PARALLEL_BRANCH_START, + /**{@link BlockEndNode} ending a parallel block... or last executed nodes */ PARALLEL_BRANCH_END, } @@ -262,7 +267,7 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall * This works by walking back to construct the tree of parallel blocks covering all heads back to the Least Common Ancestor of all heads * (the top parallel block). One by one, as branches join, we remove them from the list of live pieces and replace with their common ancestor. * - *

              The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist: + *

              The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist: *

                *
              • We track FlowPieces, which are Forks (where branches merge) and FlowSegments (where there's a unforked sequence of nodes)
              • *
              • A map of FlowNode to its containing FlowPiece is created
              • @@ -282,7 +287,7 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall *
              • Each time we merge a branch in, we need to remove an entry from enclosing blocks & live pieces
              • *
              * - *

              There are some assumptions you need to know about to understand why this works: + *

              There are some assumptions you need to know about to understand why this works: *

                *
              • None of the pieces have multiple parents, since we only look at enclosing blocks (only be a BlockEndNodes for a parallel block have multipel parents)
              • *
              • No cycles exist in the graph
              • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java index aeae680d..11806abf 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LabelledChunkFinder.java @@ -9,8 +9,10 @@ import javax.annotation.Nonnull; /** - * Basically finds stages. *Technically* it's any block of nodes. - * Creates chunks whenever you have a labelled linear block (not a parallel branch). + * Splits a flow execution into {@link FlowChunk}s whenever you have a label. + * This works for labelled blocks or single-step labels. + * + * Useful for collecting stages and parallel branches. * @author Sam Van Oort */ public class LabelledChunkFinder implements ChunkFinder { @@ -19,13 +21,15 @@ public boolean isStartInsideChunk() { return true; } + /** Start is anywhere with a {@link LabelAction} */ @Override public boolean isChunkStart(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { LabelAction la = current.getAction(LabelAction.class); return la != null; } - /** End is where you have a label marker before it... or */ + /** End is where the previous node is a chunk start + * or this is a {@link BlockEndNode} whose {@link BlockStartNode} has a label action */ @Override public boolean isChunkEnd(@Nonnull FlowNode current, @CheckForNull FlowNode previous) { if (previous == null) { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index 11bc7363..ba9f43d9 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -38,15 +38,15 @@ * Extension of {@link LinearScanner} that skips nested blocks at the current level, useful for finding enclosing blocks. * ONLY use this with nodes inside the flow graph, never the last node of a completed flow (it will jump over the whole flow). * - *

                This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). + *

                This is useful where you only care about {@link FlowNode}s that precede this one or are part of an enclosing scope (within a Block). * - *

                Specifically: + *

                Specifically: *

                  *
                • Where a {@link BlockEndNode} is encountered, the scanner will jump to the {@link BlockStartNode} and go to its first parent.
                • *
                • The only case where you visit branches of a parallel block is if you begin inside it.
                • *
                * - *

                Specific use cases: + *

                Specific use cases: *

                  *
                • Finding out the executor workspace used to run a FlowNode
                • *
                • Finding the start of the parallel block enclosing the current node
                • diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index e8b34284..93c2e26e 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -35,12 +35,12 @@ /** * Scans through the flow graph in strictly linear fashion, visiting only the first branch in parallel blocks. * - *

                  Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} + *

                  Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} * This means that where are parallel branches, we will only visit a partial set of {@link FlowNode}s in the directed acyclic graph. * - *

                  Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. + *

                  Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * - *

                  This is the fastest and simplest way to walk a flow, because you only care about a single node at a time. + *

                  This is the fastest and simplest way to walk a flow, because you only care about a single node at a time. * @author Sam Van Oort */ @NotThreadSafe diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index a2c658b8..03ad6cd7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -23,6 +23,8 @@ */ package org.jenkinsci.plugins.workflow.graphanalysis; +import org.jenkinsci.plugins.workflow.graph.BlockEndNode; +import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.CheckForNull; @@ -30,10 +32,10 @@ /** * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks. - *

                  A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. + *

                  A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. * - *

                  Implementations get to decide how to use & handle chunks. - *

                  At a minimum they should handle:

                  + *

                  Implementations get to decide how to use & handle chunks. + *

                  At a minimum they should handle:

                  *
                    *
                  • Unbalanced numbers of chunk start/end calls (for incomplete flows)
                  • *
                  • A chunk end with no beginning (runs to start of flow, or never began)
                  • @@ -43,9 +45,9 @@ *
                  * * Important implementation note: multiple callbacks can be invoked for a single node depending on its type. - *

                  For example, we may capture parallels as chunks. + *

                  For example, we may capture parallels as chunks. * - *

                  Callbacks Reporting on chunk/parallel information:

                  + *

                  Callbacks Reporting on chunk/parallel information:

                  *
                    *
                  • {@link #chunkStart(FlowNode, FlowNode, ForkScanner)} is called on the current node when we hit start of a boundary (inclusive)
                  • *
                  • {@link #chunkEnd(FlowNode, FlowNode, ForkScanner)} is called when we hit end of a boundary (inclusive)
                  • @@ -58,18 +60,23 @@ public interface SimpleChunkVisitor { /** - * Called when hitting the start of a chunk + * Called when hitting the start of a chunk. * @param startNode First node in chunk (marker), included in node - * @param beforeBlock First node before chunk + * @param beforeBlock First node before chunk (null if none exist) * @param scanner Forkscanner used (for state tracking) */ void chunkStart(@Nonnull FlowNode startNode, @CheckForNull FlowNode beforeBlock, @Nonnull ForkScanner scanner); - /** Called when hitting the end of a block */ + /** + * Called when hitting the end of a chunk. + * @param endNode Last node in chunk + * @param afterChunk Node after chunk (null if we are on the last node) + * @param scanner Forkscanner used (for state tracking) + */ void chunkEnd(@Nonnull FlowNode endNode, @CheckForNull FlowNode afterChunk, @Nonnull ForkScanner scanner); /** - * Notifies that we've hit the start of a parallel block (the point where it branches out) + * Notifies that we've hit the start of a parallel block (the point where it branches out). * @param parallelStartNode The {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} beginning it, next will be branches * @param branchNode {@link org.jenkinsci.plugins.workflow.graph.BlockStartNode} for one of the branches (it will be labelled) * @param scanner ForkScanner used @@ -78,8 +85,8 @@ public interface SimpleChunkVisitor { /** * Notifies that we've seen the end of a parallel block - * @param parallelStartNode First node of parallel (BlockStartNode before the branches) - * @param parallelEndNode Last node of parallel (BlockEndNode) + * @param parallelStartNode First node of parallel ({@link BlockStartNode} before the branches) + * @param parallelEndNode Last node of parallel ({@link BlockEndNode}) * @param scanner */ void parallelEnd(@Nonnull FlowNode parallelStartNode, @Nonnull FlowNode parallelEndNode, @Nonnull ForkScanner scanner); @@ -94,7 +101,7 @@ public interface SimpleChunkVisitor { /** * Hit the end start of a parallel branch - *

                    May not be invoked if we're inside an in-progress parallel + *

                    May not be invoked if we're inside an in-progress parallel * @param parallelStartNode First node of parallel (BlockStartNode before the branches) * @param branchEndNode Final node of the branch (may be BlockEndNode if done, otherwise just the last one executed) * @param scanner diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java index 3b11ef16..2e9495bf 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/StandardChunkVisitor.java @@ -6,10 +6,11 @@ import javax.annotation.Nonnull; /** - * Simple handler for linear chunks (basic stages, etc), designed to be extended - * Note: does not handle parallels or nesting - * Extend {@link #handleChunkDone(MemoryFlowChunk)} to gather up final chunks - * Extend {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} to gather data about nodes in a chunk + * Simple handler for linear {@link FlowChunk}s (basic stages, etc), and designed to be extended. + * Note: only tracks one chunk at a time, so it won't handle nesting or parallels. + * Specifically, it will reset with each chunk start. + * Extend {@link #handleChunkDone(MemoryFlowChunk)} to gather up final chunks. + * Extend {@link #atomNode(FlowNode, FlowNode, FlowNode, ForkScanner)} to gather data about nodes in a chunk. * @author Sam Van Oort */ public class StandardChunkVisitor implements SimpleChunkVisitor { @@ -17,8 +18,8 @@ public class StandardChunkVisitor implements SimpleChunkVisitor { protected MemoryFlowChunk chunk = new MemoryFlowChunk(); - /** Override me to do something once the chunk is finished - * Note: the chunk will be mutated directly, so you need to copy it if you want to do something + /** Override me to do something once the chunk is finished (such as add it to a list). + * Note: the chunk will be mutated directly, so you need to copy it if you want to do something. */ protected void handleChunkDone(@Nonnull MemoryFlowChunk chunk) { // NO-OP initially diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java index eaf625d0..63374edc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/package-info.java @@ -1,11 +1,11 @@ /** * Provides a library of methods to work with and analyze the graph of {@link org.jenkinsci.plugins.workflow.graph.FlowNode}s produced from a pipeline execution. * - *

                    The core APIs are described in the javadocs for {@link org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner} + *

                    The core APIs are described in the javadocs for {@link org.jenkinsci.plugins.workflow.graphanalysis.AbstractFlowScanner} * But in general it provides for iteration through the Directed Acyclic Graph (DAG) of a flow, filtering, search for matches, and * visiting all nodes via internal iteration. * - *

                    Static methods and a few implementations are also provided in {@link org.jenkinsci.plugins.workflow.graphanalysis.FlowScanningUtils}. + *

                    Static methods and a few implementations are also provided in {@link org.jenkinsci.plugins.workflow.graphanalysis.FlowScanningUtils}. */ package org.jenkinsci.plugins.workflow.graphanalysis; \ No newline at end of file From 8ed03eecfc05ebe007659e22bb58c13e9ac9873b Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 09:15:50 -0400 Subject: [PATCH 095/104] Fix more JavaDocs formatting due to DocLint being a jerk. --- .../graphanalysis/AbstractFlowScanner.java | 18 +++++++++--------- .../graphanalysis/SimpleChunkVisitor.java | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 8afd9e08..f14efb08 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -47,13 +47,13 @@ * *

                    This provides 6 base APIs to use, in decreasing expressiveness and increasing genericity: *

                      - * - {@link #findFirstMatch(Collection, Collection, Predicate)}: find the first FlowNode matching predicate condition. - * - {@link #filteredNodes(Collection, Collection, Predicate)}: return the collection of FlowNodes matching the predicate. - * - {@link #visitAll(Collection, FlowNodeVisitor)}: given a {@link FlowNodeVisitor}, invoke {@link FlowNodeVisitor#visit(FlowNode)} on each node and halt when it returns false. - * - Iterator: Each FlowScanner can be used as an Iterator for FlowNode-by-FlowNode walking, - * after you invoke {@link #setup(Collection, Collection)} to initialize it for iteration. - * - {@link Filterator}: If initialized as an Iterator, each FlowScanner can provide a filtered view from the current point in time. - * - Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized. + *
                    • {@link #findFirstMatch(Collection, Collection, Predicate)}: find the first FlowNode matching predicate condition.
                    • + *
                    • {@link #filteredNodes(Collection, Collection, Predicate)}: return the collection of FlowNodes matching the predicate.
                    • + *
                    • {@link #visitAll(Collection, FlowNodeVisitor)}: given a {@link FlowNodeVisitor}, invoke {@link FlowNodeVisitor#visit(FlowNode)} on each node and halt when it returns false.
                    • + *
                    • Iterator: Each FlowScanner can be used as an Iterator for FlowNode-by-FlowNode walking, + * after you invoke {@link #setup(Collection, Collection)} to initialize it for iteration.
                    • + *
                    • {@link Filterator}: If initialized as an Iterator, each FlowScanner can provide a filtered view from the current point in time.
                    • + *
                    • Iterable: for syntactic sugar, FlowScanners implement Iterable to allow use in for-each loops once initialized.
                    • *
                    * *

                    All APIs visit the parent nodes, walking backward from heads(inclusive) until they they hit {@link #myBlackList} nodes (exclusive) or reach the end of the DAG. @@ -61,7 +61,7 @@ * Multiple blackList nodes are helpful for putting separate bounds on walking different parallel branches. * *

                    Key Points: - *

                  • There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures).
                  • + *
                    • There are many helper methods offering syntactic sugar for the above APIs in common use cases (simpler method signatures).
                    • *
                    • Each implementation provides its own iteration order (described in its javadoc comments), * but it is generally unsafe to rely on parallel branches being visited in a specific order.
                    • *
                    • Implementations may visit some or all points in the DAG, this should be called out in the class's javadoc comments
                    • @@ -72,7 +72,7 @@ *
                    • This state can be used to construct more advanced analyses.
                    • *
                    • FlowScanners can be reinitialized and reused repeatedly: avoids the overheads of creating scanners repeatedly.
                    • *
                    • Allows for caching to be added inside a FlowScanner if desired, but caching is only useful when reused.
                    • - *
                    + *
                * *

                Suggested uses: *

                  diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java index 03ad6cd7..24278e6a 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/SimpleChunkVisitor.java @@ -34,7 +34,7 @@ * This visitor's callbacks are invoked as we walk through a pipeline flow graph, and it splits it into chunks. *

                  A {@link ForkScanner#visitSimpleChunks(SimpleChunkVisitor, ChunkFinder)} creates these FlowChunks using a {@link ChunkFinder} to define the chunk boundaries. * - *

                  Implementations get to decide how to use & handle chunks. + *

                  Implementations get to decide how to use and handle chunks. *

                  At a minimum they should handle:

                  *
                    *
                  • Unbalanced numbers of chunk start/end calls (for incomplete flows)
                  • From 80b7ad4f3022d3e21ad187b9f1635d16cfa65d92 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 09:17:10 -0400 Subject: [PATCH 096/104] Fix yet more doclint whining --- .../java/org/jenkinsci/plugins/workflow/pickles/Pickle.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java index 48c4362b..dfcbb7fe 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java @@ -50,7 +50,7 @@ public ListenableFuture rehydrate() { * An implementation should return quickly and avoid acquiring locks in this method itself (as opposed to the future). * {@link ListenableFuture#cancel} should be implemented if possible. * @param owner an owner handle on which you may, for example, call {@link FlowExecutionOwner#getListener} - * @return a future on which {@link ListenableFuture#cancel} might be called; also polite to override {@link ListenableFuture#toString} for diagnostics + * @return a future on which {@link ListenableFuture#cancel(boolean)} might be called; also polite to override {@link ListenableFuture#toString()} for diagnostics */ public ListenableFuture rehydrate(FlowExecutionOwner owner) { if (Util.isOverridden(Pickle.class, getClass(), "rehydrate")) { From cf4d5acb6d3c5f0bf0c269c12d3c53d904c7a258 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 09:29:44 -0400 Subject: [PATCH 097/104] OMG doclint really --- .../plugins/workflow/graphanalysis/AbstractFlowScanner.java | 5 +++-- .../java/org/jenkinsci/plugins/workflow/pickles/Pickle.java | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index f14efb08..d090aab5 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -68,11 +68,12 @@ *
                  • FlowScanners are NOT thread safe, for performance reasons and because it is too hard to guarantee.
                  • *
                  • Many fields and methods are protected: this is intentional to allow building upon the implementations for more complex analyses.
                  • *
                  • Each FlowScanner stores state internally for several reasons:
                  • - *
                      + *
                      • *
                      • This state can be used to construct more advanced analyses.
                      • *
                      • FlowScanners can be reinitialized and reused repeatedly: avoids the overheads of creating scanners repeatedly.
                      • *
                      • Allows for caching to be added inside a FlowScanner if desired, but caching is only useful when reused.
                      • - *
                    + *
                  + *
                * *

                Suggested uses: *

                  diff --git a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java index dfcbb7fe..86a2e8f9 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java @@ -50,7 +50,7 @@ public ListenableFuture rehydrate() { * An implementation should return quickly and avoid acquiring locks in this method itself (as opposed to the future). * {@link ListenableFuture#cancel} should be implemented if possible. * @param owner an owner handle on which you may, for example, call {@link FlowExecutionOwner#getListener} - * @return a future on which {@link ListenableFuture#cancel(boolean)} might be called; also polite to override {@link ListenableFuture#toString()} for diagnostics + * @return a future on which {@link ListenableFuture#cancel(boolean)} might be called; also polite to override the toString method for diagnostics */ public ListenableFuture rehydrate(FlowExecutionOwner owner) { if (Util.isOverridden(Pickle.class, getClass(), "rehydrate")) { From 4f1575cd125636a6feac72ab2cd679aec145abbf Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 10:44:49 -0400 Subject: [PATCH 098/104] Address review comments, mostly javadocs --- .../graphanalysis/AbstractFlowScanner.java | 14 ++++++++------ .../graphanalysis/DepthFirstScanner.java | 2 ++ .../graphanalysis/FilteratorImpl.java | 2 ++ .../graphanalysis/FlowScanningUtils.java | 12 +----------- .../workflow/graphanalysis/ForkScanner.java | 14 ++++++++------ .../LinearBlockHoppingScanner.java | 4 +++- .../workflow/graphanalysis/LinearScanner.java | 3 +++ .../graphanalysis/ForkScannerTest.java | 19 ++++++++++++++++++- 8 files changed, 45 insertions(+), 25 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java index 5022e985..d80c5127 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/AbstractFlowScanner.java @@ -30,6 +30,7 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -38,7 +39,6 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.NoSuchElementException; -import java.util.Set; /** * Core APIs and base logic for FlowScanners that extract information from a pipeline execution. @@ -78,7 +78,7 @@ *
                    *
                  • Implement a {@link FlowNodeVisitor} that collects metrics from each FlowNode visited, and call visitAll to extract the data.
                  • *
                  • Find all flownodes of a given type (ex: stages), using {@link #filteredNodes(Collection, Collection, Predicate)}
                  • - *
                  • Find the first node with an Error before a specific node
                  • + *
                  • Find the first node with an {@link org.jenkinsci.plugins.workflow.actions.ErrorAction} before a specific node
                  • *
                  • Scan through all nodes *just* within a block *
                      *
                    • Use the {@link org.jenkinsci.plugins.workflow.graph.BlockEndNode} as the head
                    • @@ -88,6 +88,7 @@ * * @author Sam Van Oort */ +@NotThreadSafe public abstract class AbstractFlowScanner implements Iterable , Filterator { protected FlowNode myCurrent; @@ -235,15 +236,16 @@ public Filterator filter(@Nonnull Predicate filterCondition) * Find the first FlowNode within the iteration order matching a given condition * Includes null-checking on arguments to allow directly calling with unchecked inputs (simplifies use). * @param heads Head nodes to start walking from - * @param endNodes + * @param blackListNodes Nodes that are never visited, search stops here (bound is exclusive). + * If you want to create an inclusive bound, just use a node's parents. * @param matchCondition Predicate to match when we've successfully found a given node type * @return First matching node, or null if no matches found */ @CheckForNull public FlowNode findFirstMatch(@CheckForNull Collection heads, - @CheckForNull Collection endNodes, + @CheckForNull Collection blackListNodes, Predicate matchCondition) { - if (!setup(heads, endNodes)) { + if (!setup(heads, blackListNodes)) { return null; } @@ -283,7 +285,7 @@ public FlowNode findFirstMatch(@CheckForNull FlowExecution exec, @Nonnull Predic * Includes null-checking on arguments to allow directly calling with unchecked inputs (simplifies use). * @param heads Nodes to start iterating backward from by visiting their parents. * @param blackList Nodes we may not visit or walk beyond. - * @param matchCondition Predicate that must be met for nodes to be included in output. + * @param matchCondition Predicate that must be met for nodes to be included in output. Input is always non-null. * @return List of flownodes matching the predicate. */ @Nonnull diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index bfaeb43f..0703c8d0 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -28,6 +28,7 @@ import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.ArrayDeque; import java.util.Collection; import java.util.HashSet; @@ -42,6 +43,7 @@ *

                      The behavior is analogous to {@link org.jenkinsci.plugins.workflow.graph.FlowGraphWalker} but faster. * @author Sam Van Oort */ +@NotThreadSafe public class DepthFirstScanner extends AbstractFlowScanner { protected ArrayDeque queue; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java index 1dd769b8..272e3e47 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FilteratorImpl.java @@ -27,11 +27,13 @@ import com.google.common.base.Predicate; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.Iterator; /** Filters an iterator against a match predicate by wrapping an iterator * @author Sam Van Oort */ +@NotThreadSafe class FilteratorImpl implements Filterator { private boolean hasNext = false; private T nextVal = null; diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java index 4c42ffbd..efd7a3d7 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScanningUtils.java @@ -32,15 +32,10 @@ import org.jenkinsci.plugins.workflow.actions.LogAction; import org.jenkinsci.plugins.workflow.actions.StageAction; import org.jenkinsci.plugins.workflow.actions.WorkspaceAction; -import org.jenkinsci.plugins.workflow.graph.BlockEndNode; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; -import java.util.ArrayDeque; -import java.util.Collection; -import java.util.Collections; -import java.util.List; /** * Library of common functionality when analyzing/walking flow graphs @@ -57,7 +52,7 @@ private FlowScanningUtils() {} * @return Predicate that will match when FlowNode has the action given */ @Nonnull - public static Predicate nodeHasActionPredicate(@Nonnull final Class actionClass) { + public static Predicate hasActionPredicate(@Nonnull final Class actionClass) { return new Predicate() { @Override public boolean apply(FlowNode input) { @@ -67,11 +62,6 @@ public boolean apply(FlowNode input) { } // Default predicates, which may be used for common conditions - public static final Predicate MATCH_HAS_LABEL = nodeHasActionPredicate(LabelAction.class); - public static final Predicate MATCH_IS_STAGE = nodeHasActionPredicate(StageAction.class); - public static final Predicate MATCH_HAS_WORKSPACE = nodeHasActionPredicate(WorkspaceAction.class); - public static final Predicate MATCH_HAS_ERROR = nodeHasActionPredicate(ErrorAction.class); - public static final Predicate MATCH_HAS_LOG = nodeHasActionPredicate(LogAction.class); public static final Predicate MATCH_BLOCK_START = (Predicate)Predicates.instanceOf(BlockStartNode.class); /** diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index c52abc22..9708121b 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -31,6 +31,7 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collection; @@ -61,6 +62,7 @@ * * @author Sam Van Oort */ +@NotThreadSafe public class ForkScanner extends AbstractFlowScanner { // Last element in stack is end of myCurrent parallel start, first is myCurrent start @@ -82,7 +84,8 @@ protected void reset() { myNext = null; } - /** If true, we are walking from the flow end node and have a complete view of the flow */ + /** If true, we are walking from the flow end node and have a complete view of the flow + * Needed because there are implications when not walking from a finished flow (blocks without a {@link BlockEndNode})*/ public boolean isWalkingFromFinish() { return walkingFromFinish; } @@ -264,7 +267,7 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) while (itIterator.hasNext()) { Filterator blockStartIterator = itIterator.next(); - FlowPiece myPiece = pieceIterator.next(); + FlowPiece myPiece = pieceIterator.next(); //Safe because we always remove/add with both iterators at once // Welp we hit the end of a branch if (!blockStartIterator.hasNext()) { @@ -316,7 +319,6 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) @Override protected void setHeads(@Nonnull Collection heads) { if (heads.size() > 1) { - //throw new IllegalArgumentException("ForkedFlowScanner can't handle multiple head nodes yet"); parallelBlockStartStack = leastCommonAncestor(new LinkedHashSet(heads)); currentParallelStart = parallelBlockStartStack.pop(); currentParallelStartNode = currentParallelStart.forkStart; @@ -415,8 +417,8 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection // First we look at the parents of the current node if present List parents = current.getParents(); - if (parents == null || parents.size() == 0) { - // welp done with this node, guess we consult the queue? + if (parents.isEmpty()) { + // welp, we're done with this node, guess we consult the queue? } else if (parents.size() == 1) { FlowNode p = parents.get(0); if (p == currentParallelStartNode) { @@ -436,7 +438,7 @@ protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection return possibleOutput; } } else { - throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent.toString()); + throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent); } if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java index 9f385045..db18c70f 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearBlockHoppingScanner.java @@ -30,6 +30,7 @@ import javax.annotation.CheckForNull; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.Collection; import java.util.List; @@ -49,11 +50,12 @@ *

                        *
                      • Finding out the executor workspace used to run a FlowNode
                      • *
                      • Finding the start of the parallel block enclosing the current node
                      • - *
                      • Locating the label applying to a given FlowNode (if any)
                      • + *
                      • Locating the label applying to a given FlowNode (if any) if using labelled blocks
                      • *
                      * * @author Sam Van Oort */ +@NotThreadSafe public class LinearBlockHoppingScanner extends LinearScanner { @Override diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 8f273501..e9f432bc 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -27,6 +27,7 @@ import org.jenkinsci.plugins.workflow.graph.FlowNode; import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -40,8 +41,10 @@ *

                      Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * *

                      This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. + * Nuance: where there are multiple parent nodes (in a parallel block), and one is blacklisted, we'll find the first non-blacklisted one. * @author Sam Van Oort */ +@NotThreadSafe public class LinearScanner extends AbstractFlowScanner { @Override diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index de232f58..29dba9cf 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -24,6 +24,8 @@ package org.jenkinsci.plugins.workflow.graphanalysis; +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; import org.jenkinsci.plugins.workflow.flow.FlowExecution; import org.jenkinsci.plugins.workflow.graph.BlockStartNode; @@ -43,6 +45,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashSet; +import java.util.List; import java.util.Set; // Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers @@ -131,6 +134,7 @@ public void setUp() throws Exception { "}\n" + "steps['2'] = {\n" + " echo '2a'\n" + + " echo '2b'\n" + " def nested = [:]\n" + " nested['2-1'] = {\n" + " echo 'do 2-1'\n" + @@ -139,7 +143,6 @@ public void setUp() throws Exception { " sleep 1\n" + " echo '2 section 2'\n" + " }\n" + - " echo '2b'\n" + " parallel nested\n" + "}\n" + "parallel steps\n" + @@ -280,6 +283,20 @@ public void testFlowSegmentSplit() throws Exception { Assert.assertEquals(sideBranch, nodeMap.get(exec.getNode("7"))); } + @Test + public void testEmptyParallel() throws Exception { + WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "EmptyParallel"); + job.setDefinition(new CpsFlowDefinition( + "parallel 'empty1': {}, 'empty2':{} \n" + + "echo 'done' " + )); + WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); + ForkScanner scan = new ForkScanner(); + + List outputs = scan.filteredNodes(b.getExecution().getCurrentHeads(), (Predicate) Predicates.alwaysTrue()); + Assert.assertEquals(9, outputs.size()); + } + /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */ @Test public void testLeastCommonAncestor() throws Exception { From 4cc9206fbab463b8c82df6e8d5a7e3c9730fb9d5 Mon Sep 17 00:00:00 2001 From: Jesse Glick Date: Mon, 22 Aug 2016 15:20:04 -0400 Subject: [PATCH 099/104] Javadoc error. --- .../java/org/jenkinsci/plugins/workflow/pickles/Pickle.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java index 86a2e8f9..26219fd2 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/pickles/Pickle.java @@ -50,7 +50,7 @@ public ListenableFuture rehydrate() { * An implementation should return quickly and avoid acquiring locks in this method itself (as opposed to the future). * {@link ListenableFuture#cancel} should be implemented if possible. * @param owner an owner handle on which you may, for example, call {@link FlowExecutionOwner#getListener} - * @return a future on which {@link ListenableFuture#cancel(boolean)} might be called; also polite to override the toString method for diagnostics + * @return a future on which {@link ListenableFuture#cancel(boolean)} might be called; also polite to override the {@link Object#toString} method for diagnostics */ public ListenableFuture rehydrate(FlowExecutionOwner owner) { if (Util.isOverridden(Pickle.class, getClass(), "rehydrate")) { From 2bcfd8b850be3eabb7b5029952272f88a7d035a0 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 10:57:53 -0400 Subject: [PATCH 100/104] Solved a javadoc hiccup --- .../jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java index 2492c662..a7326812 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java @@ -40,7 +40,7 @@ * *

                      Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. * - *

                      This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. + *

                      This is the fastest and simplest way to walk a flow, because you only care about a single node at a time. * Nuance: where there are multiple parent nodes (in a parallel block), and one is blacklisted, we'll find the first non-blacklisted one. * @author Sam Van Oort */ From 34483c34a4de9182b075d10ad916cd3de34ddf16 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 11:00:22 -0400 Subject: [PATCH 101/104] Remove accidentally commit merge files --- .../graphanalysis/ForkScanner.java.orig | 596 ------------------ .../graphanalysis/LinearScanner.java.orig | 85 --- .../graphanalysis/ForkScannerTest.java.orig | 489 -------------- 3 files changed, 1170 deletions(-) delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java.orig delete mode 100644 src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java.orig delete mode 100644 src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java.orig diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java.orig b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java.orig deleted file mode 100644 index fa603263..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java.orig +++ /dev/null @@ -1,596 +0,0 @@ -/* - * The MIT License - * - * Copyright (c) 2016, CloudBees, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -package org.jenkinsci.plugins.workflow.graphanalysis; - -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import org.jenkinsci.plugins.workflow.actions.ThreadNameAction; -import org.jenkinsci.plugins.workflow.graph.BlockEndNode; -import org.jenkinsci.plugins.workflow.graph.BlockStartNode; -import org.jenkinsci.plugins.workflow.graph.FlowEndNode; -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import javax.annotation.concurrent.NotThreadSafe; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.ListIterator; -import java.util.Set; - -/** - * Scanner that will scan down all forks when we hit parallel blocks before continuing, but generally runs in linear order - *

                      Think of it as the opposite of {@link DepthFirstScanner}. - * - *

                      This is a fairly efficient way to visit all FlowNodes, and provides three useful guarantees: - *

                        - *
                      • Every FlowNode is visited, and visited EXACTLY ONCE (not true for LinearScanner)
                      • - *
                      • All parallel branches are visited before we move past the parallel block (not true for DepthFirstScanner)
                      • - *
                      • For EVERY block, the BlockEndNode is visited before the BlockStartNode (not true for DepthFirstScanner, with parallels)
                      • - *
                      - * - *

                      The big advantages of this approach: - *

                        - *
                      • Blocks are visited in the order they end (no backtracking) - helps with working a block at a time
                      • - *
                      • Points are visited in linear order within a block (easy to use for analysis)
                      • - *
                      • Minimal state information needed
                      • - *
                      • Branch information is available for use here
                      • - *
                      - * - * @author Sam Van Oort - */ -@NotThreadSafe -public class ForkScanner extends AbstractFlowScanner { - - @CheckForNull - public NodeType getCurrentType() { - return currentType; - } - - @CheckForNull - public NodeType getNextType() { - return nextType; - } - - /** Used to recognize special nodes */ - public enum NodeType { - /** Not any of the parallel types */ - NORMAL, - /**{@link BlockStartNode} starting a parallel block */ - PARALLEL_START, - /**{@link BlockEndNode} ending a parallel block */ - PARALLEL_END, - /**{@link BlockStartNode} starting a branch of a parallel */ - PARALLEL_BRANCH_START, - /**{@link BlockEndNode} ending a parallel block... or last executed nodes */ - PARALLEL_BRANCH_END, - } - - // Last element in stack is end of myCurrent parallel start, first is myCurrent start - ArrayDeque parallelBlockStartStack = new ArrayDeque(); - - /** FlowNode that will terminate the myCurrent parallel block */ - FlowNode currentParallelStartNode = null; - - ParallelBlockStart currentParallelStart = null; - - private boolean walkingFromFinish = false; - - NodeType currentType = null; - NodeType nextType = null; - - public ForkScanner() { - - } - - public ForkScanner(@Nonnull Collection heads) { - this.setup(heads); - } - - public ForkScanner(@Nonnull Collection heads, @Nonnull Collection blackList) { - this.setup(heads, blackList); - } - - @Override - protected void reset() { - parallelBlockStartStack.clear(); - currentParallelStart = null; - currentParallelStartNode = null; - myCurrent = null; - myNext = null; - } - -<<<<<<< HEAD - /** If true, we are walking from the flow end node and have a complete view of the flow - * Needed because there are implications when not walking from a finished flow (blocks without a {@link BlockEndNode})*/ -======= - // A bit of a dirty hack, but it works around the fact that we need trivial access to classes from workflow-cps - // For this and only this test. So, we load them from a context that is aware of them. - // Ex: workflow-cps can automatically set this correctly. Not perfectly graceful but it works. - private static Predicate parallelStartPredicate = Predicates.alwaysFalse(); - - // Invoke this passing a test against the ParallelStep conditions - public static void setParallelStartPredicate(@Nonnull Predicate pred) { - parallelStartPredicate = pred; - } - - // Needed because the *next* node might be a parallel start if we start in middle and we don't know it - public static boolean isParallelStart(@CheckForNull FlowNode f) { - return parallelStartPredicate.apply(f); - } - - // Needed because the *next* node might be a parallel end and we don't know it from a normal one - public static boolean isParallelEnd(@CheckForNull FlowNode f) { - return f != null && f instanceof BlockEndNode && (f.getParents().size()>1 || isParallelStart(((BlockEndNode) f).getStartNode())); - } - - /** If true, we are walking from the flow end node and have a complete view of the flow */ ->>>>>>> block-scanning-APIs - public boolean isWalkingFromFinish() { - return walkingFromFinish; - } - - /** Tracks state for parallel blocks, so we can ensure all are visited and know the branch starting point */ - static class ParallelBlockStart { - BlockStartNode forkStart; // This is the node with child branches - ArrayDeque unvisited = new ArrayDeque(); // Remaining branches of this that we have have not visited yet - - ParallelBlockStart(BlockStartNode forkStart) { - this.forkStart = forkStart; - } - - /** Strictly for internal use in the least common ancestor problem */ - ParallelBlockStart() {} - } - - interface FlowPiece { // Mostly a marker - /** If true, this is not a fork and has no following forks */ - boolean isLeaf(); - } - - /** Linear (no parallels) run of FLowNodes */ - // TODO see if this can be replaced with a FlowChunk acting as a container class for a list of FlowNodes - static class FlowSegment implements FlowPiece { - ArrayList visited = new ArrayList(); - FlowPiece after; - boolean isLeaf = true; - - @Override - public boolean isLeaf() { - return isLeaf; - } - - /** - * We have discovered a forking node intersecting our FlowSegment in the middle or meeting at the end - * Now we need to split the flow, or pull out the fork point and make both branches follow it - * @param nodeMapping Mapping of BlockStartNodes to flowpieces (forks or segments) - * @param joinPoint Node where the branches intersect/meet (fork point) - * @param joiningBranch Flow piece that is joining this - * @throws IllegalStateException When you try to split a segment on a node that it doesn't contain, or invalid graph structure - * @return Recreated fork - */ - Fork split(@Nonnull HashMap nodeMapping, @Nonnull BlockStartNode joinPoint, @Nonnull FlowPiece joiningBranch) { - int index = visited.lastIndexOf(joinPoint); // Fork will be closer to end, so this is better than indexOf - Fork newFork = new Fork(joinPoint); - - if (index < 0) { - throw new IllegalStateException("Tried to split a segment where the node doesn't exist in this segment"); - } else if (index == this.visited.size()-1) { // We forked just off the most recent node - newFork.following.add(this); - newFork.following.add(joiningBranch); - this.visited.remove(index); - } else if (index == 0) { - throw new IllegalStateException("We have a cyclic graph or heads that are not separate branches!"); - } else { // Splitting at some midpoint within the segment, everything before becomes part of the following - // Execute the split: create a new fork at the fork point, and shuffle the part of the flow after it - // to a new segment and add that to the fork. - - FlowSegment newSegment = new FlowSegment(); - newSegment.after = this.after; - newSegment.visited.addAll(this.visited.subList(0, index)); - newFork.following.add(newSegment); - newFork.following.add(joiningBranch); - this.after = newFork; - this.isLeaf = false; - - // Remove the part before the fork point - this.visited.subList(0, index+1).clear(); - for (FlowNode n : newSegment.visited) { - nodeMapping.put(n, newSegment); - } - } - nodeMapping.put(joinPoint, newFork); - return newFork; - } - - public void add(FlowNode f) { - this.visited.add(f); - } - } - - /** Internal class used for constructing the LeastCommonAncestor structure */ - // TODO see if this can be replaced with a FlowChunk acting as a container class for parallels - // I.E. ParallelMemoryFlowChunk or similar - static class Fork extends ParallelBlockStart implements FlowPiece { - List following = new ArrayList(); - - @Override - public boolean isLeaf() { - return false; - } - - public Fork(BlockStartNode forkNode) { - this.forkStart = forkNode; - } - } - - /** Does a conversion of the fork container class to a set of block starts */ - ArrayDeque convertForksToBlockStarts(ArrayDeque parallelForks) { - // Walk through and convert forks to parallel block starts, and find heads that point to them - ArrayDeque output = new ArrayDeque(); - for (Fork f : parallelForks) { - // Do processing to assign heads to flowsegments - ParallelBlockStart start = new ParallelBlockStart(); - start.forkStart = f.forkStart; - start.unvisited = new ArrayDeque(); - - // Add the nodes to the parallel starts here - for (FlowPiece fp : f.following) { - if (fp.isLeaf()) { // Forks are never leaves - start.unvisited.add(((FlowSegment)fp).visited.get(0)); - } - } - output.add(start); - } - return output; - } - - /** - * Create the necessary information about parallel blocks in order to provide flowscanning from inside incomplete parallel branches - * This works by walking back to construct the tree of parallel blocks covering all heads back to the Least Common Ancestor of all heads - * (the top parallel block). One by one, as branches join, we remove them from the list of live pieces and replace with their common ancestor. - * - *

                      The core algorithm is simple in theory but the many cases render the implementation quite complex. In gist: - *

                        - *
                      • We track FlowPieces, which are Forks (where branches merge) and FlowSegments (where there's a unforked sequence of nodes)
                      • - *
                      • A map of FlowNode to its containing FlowPiece is created
                      • - *
                      • For each head we start a new FlowSegment and create an iterator of all enclosing blocks (all we need for this)
                      • - *
                      • We do a series of passes through all iterators looking to see if the parent of any given piece maps to an existing FlowPiece
                      • - *
                          - *
                        1. Where there are no mappings, we add another node to the FlowSegment
                        2. - *
                        3. Where an existing piece exists, if it's a Fork, we add the current piece on as a new branch
                        4. - *
                        5. Where an existing piece exists if it's a FlowSegment, we create a fork: - *
                          • If we're joining at the most recent point, create a Fork with both branches following it, and replace that item's ForkSegment in the piece list with a Fork
                          • - *
                          • If joining midway through, split the segment and create a fork as needed
                          - *
                        6. - *
                        7. When two pieces join together, we remove one from the list
                        8. - *
                        9. When we're down to a single piece, we have the full ancestry & we're done
                        10. - *
                        11. When we're down to a single piece, all heads have merged and we're done
                        12. - *
                        - *
                      • Each time we merge a branch in, we need to remove an entry from enclosing blocks & live pieces
                      • - *
                      - * - *

                      There are some assumptions you need to know about to understand why this works: - *

                        - *
                      • None of the pieces have multiple parents, since we only look at enclosing blocks (only be a BlockEndNodes for a parallel block have multipel parents)
                      • - *
                      • No cycles exist in the graph
                      • - *
                      • Flow graphs are correctly constructed
                      • - *
                      • Heads are all separate branches
                      • - *
                      - * - * @param heads - */ - ArrayDeque leastCommonAncestor(@Nonnull Set heads) { - HashMap branches = new HashMap(); - ArrayList> iterators = new ArrayList>(); - ArrayList livePieces = new ArrayList(); - - ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter - - for (FlowNode f : heads) { - iterators.add(FlowScanningUtils.fetchEnclosingBlocks(f)); - FlowSegment b = new FlowSegment(); - b.add(f); - livePieces.add(b); - branches.put(f, b); - } - - // Walk through, merging flownodes one-by-one until everything has merged to one ancestor - while (iterators.size() > 1) { - ListIterator> itIterator = iterators.listIterator(); - ListIterator pieceIterator = livePieces.listIterator(); - - while (itIterator.hasNext()) { - Filterator blockStartIterator = itIterator.next(); - FlowPiece myPiece = pieceIterator.next(); //Safe because we always remove/add with both iterators at once - - // Welp we hit the end of a branch - if (!blockStartIterator.hasNext()) { - pieceIterator.remove(); - itIterator.remove(); - continue; - } - - FlowNode nextBlockStart = blockStartIterator.next(); - - // Look for cases where two branches merge together - FlowPiece existingPiece = branches.get(nextBlockStart); - if (existingPiece == null && myPiece instanceof FlowSegment) { // No merge, just add to segment - ((FlowSegment) myPiece).add(nextBlockStart); - branches.put(nextBlockStart, myPiece); - } else if (existingPiece == null && myPiece instanceof Fork) { // No merge, we had a fork. Start a segment preceding the fork - FlowSegment newSegment = new FlowSegment(); - newSegment.isLeaf = false; - newSegment.add(nextBlockStart); - newSegment.after = myPiece; - pieceIterator.remove(); - pieceIterator.add(newSegment); - branches.put(nextBlockStart, newSegment); - } else if (existingPiece != null) { // Always not null. We're merging into another thing, we're going to elliminate a branch - if (existingPiece instanceof Fork) { - ((Fork) existingPiece).following.add(myPiece); - } else { // Split a flow segment so it forks against this one - Fork f = ((FlowSegment) existingPiece).split(branches, (BlockStartNode)nextBlockStart, myPiece); - // If we split the existing segment at its end, we created a fork replacing its latest node - // Thus we must replace the piece with the fork ahead of it - if (f.following.contains(existingPiece) ) { - int headIndex = livePieces.indexOf(existingPiece); - livePieces.set(headIndex, f); - } - parallelForks.add(f); - } - - // Merging removes the piece & its iterator from heads - itIterator.remove(); - pieceIterator.remove(); - } - } - } - - // If we hit issues with the ordering of blocks by depth, apply a sorting to the parallels by depth - return convertForksToBlockStarts(parallelForks); - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - if (heads.size() > 1) { - parallelBlockStartStack = leastCommonAncestor(new LinkedHashSet(heads)); - currentParallelStart = parallelBlockStartStack.pop(); - currentParallelStartNode = currentParallelStart.forkStart; - myCurrent = currentParallelStart.unvisited.pop(); - myNext = myCurrent; - nextType = NodeType.PARALLEL_BRANCH_END; - walkingFromFinish = false; - } else { - FlowNode f = heads.iterator().next(); - walkingFromFinish = f instanceof FlowEndNode; - myCurrent = f; - myNext = f; - if (isParallelEnd(f)) { - nextType = NodeType.PARALLEL_END; - } else if (isParallelStart(f)) { - nextType = NodeType.PARALLEL_START; - } else { - nextType = NodeType.NORMAL; - } - } - currentType = null; - } - - /** - * Return the node that begins the current parallel head - * @return The FlowNode that marks current parallel start - */ - @CheckForNull - public FlowNode getCurrentParallelStartNode() { - return currentParallelStartNode; - } - - - /** Return number of levels deep we are in parallel blocks */ - public int getParallelDepth() { - return (currentParallelStart == null) ? 0 : 1 + parallelBlockStartStack.size(); - } - - /** - * Invoked when we start entering a parallel block (walking from head of the flow, so we see the block end first) - * @param endNode Node where parents merge (final end node for the parallel block) - * @param parents Parent nodes that end here - * @return FlowNode myNext node to visit - */ - FlowNode hitParallelEnd(BlockEndNode endNode, List parents, Collection blackList) { - BlockStartNode start = endNode.getStartNode(); - - ArrayDeque branches = new ArrayDeque(); - for (FlowNode f : parents) { - if (!blackList.contains(f)) { - branches.add(f); - } - } - - FlowNode output = null; - if (branches.size() > 0) { // Push another branch start - ParallelBlockStart parallelBlockStart = new ParallelBlockStart(start); - output = branches.pop(); - parallelBlockStart.unvisited = branches; - - if (currentParallelStart != null) { - parallelBlockStartStack.push(currentParallelStart); - } - currentParallelStart = parallelBlockStart; - currentParallelStartNode = start; - } - return output; - } - - /** - * Invoked when we complete parallel block, walking from the head (so encountered after the end) - * @return FlowNode if we're the last node - */ - FlowNode hitParallelStart() { - FlowNode output = null; - - if (currentParallelStart != null) { - if (currentParallelStart.unvisited.isEmpty()) { // Strip off a completed branch - // We finished a nested set of parallel branches, visit the head and move up a level - output = currentParallelStartNode; - - if (parallelBlockStartStack.size() > 0) { - // Finished a nested parallel block, move up a level - currentParallelStart = parallelBlockStartStack.pop(); - currentParallelStartNode = currentParallelStart.forkStart; - } else { // At the top level, not inside any parallel block - currentParallelStart = null; - currentParallelStartNode = null; - } - } - } else { - throw new IllegalStateException("Hit a BlockStartNode with multiple children, and no record of the start!"); - } - - // Handle cases where the BlockStartNode for the parallel block is blackListed - return (output != null && !myBlackList.contains(output)) ? output : null; - } - - @Override - public FlowNode next() { - currentType = nextType; - FlowNode output = super.next(); - return output; - } - - @Override - protected FlowNode next(@Nonnull FlowNode current, @Nonnull Collection blackList) { - FlowNode output = null; - - // First we look at the parents of the current node if present - List parents = current.getParents(); - if (parents.isEmpty()) { - // welp, we're done with this node, guess we consult the queue? - } else if (parents.size() == 1) { - FlowNode p = parents.get(0); - if (p == currentParallelStartNode) { - // Terminating a parallel scan - FlowNode temp = hitParallelStart(); - if (temp != null) { // Start node for current parallel block now that it is done - nextType = NodeType.PARALLEL_START; - return temp; - } - } else if (!blackList.contains(p)) { - if (p instanceof BlockStartNode && p.getAction(ThreadNameAction.class) != null) { - nextType = NodeType.PARALLEL_BRANCH_START; - } else if (ForkScanner.isParallelEnd(p)) { - nextType = NodeType.PARALLEL_END; - } else { - nextType = NodeType.NORMAL; - } - return p; - } - } else if (current instanceof BlockEndNode && parents.size() > 1) { - // We must be a BlockEndNode that begins this - BlockEndNode end = ((BlockEndNode) current); - FlowNode possibleOutput = hitParallelEnd(end, parents, blackList); // What if output is block but other branches aren't? - if (possibleOutput != null) { - nextType = NodeType.PARALLEL_BRANCH_END; - return possibleOutput; - } - } else { - throw new IllegalStateException("Found a FlowNode with multiple parents that isn't the end of a block! "+ this.myCurrent); - } - - if (currentParallelStart != null && currentParallelStart.unvisited.size() > 0) { - output = currentParallelStart.unvisited.pop(); - nextType = NodeType.PARALLEL_BRANCH_END; - } - if (output == null) { - nextType = null; - } - return output; - } - - public static void visitSimpleChunks(@Nonnull Collection heads, @Nonnull Collection blacklist, @Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) { - ForkScanner scanner = new ForkScanner(); - scanner.setup(heads, blacklist); - scanner.visitSimpleChunks(visitor, finder); - } - - public static void visitSimpleChunks(@Nonnull Collection heads, @Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) { - ForkScanner scanner = new ForkScanner(); - scanner.setup(heads); - scanner.visitSimpleChunks(visitor, finder); - } - - /** Walk through flows */ - public void visitSimpleChunks(@Nonnull SimpleChunkVisitor visitor, @Nonnull ChunkFinder finder) { - FlowNode prev = null; - if (finder.isStartInsideChunk() && hasNext()) { - visitor.chunkEnd(this.myNext, null, this); - } - while(hasNext()) { - prev = (myCurrent != myNext) ? myCurrent : null; - FlowNode f = next(); - - boolean boundary = false; - if (finder.isChunkStart(myCurrent, prev)) { - visitor.chunkStart(myCurrent, myNext, this); - boundary = true; - } - if (finder.isChunkEnd(myCurrent, prev)) { - visitor.chunkEnd(myCurrent, prev, this); - boundary = true; - } - if (!boundary) { - visitor.atomNode(myNext, f, prev, this); - } - - // Trigger on parallels - switch (currentType) { - case NORMAL: - break; - case PARALLEL_END: - visitor.parallelEnd(this.currentParallelStartNode, myCurrent, this); - break; - case PARALLEL_START: - visitor.parallelStart(myCurrent, prev, this); - break; - case PARALLEL_BRANCH_END: - visitor.parallelBranchEnd(this.currentParallelStartNode, myCurrent, this); - break; - case PARALLEL_BRANCH_START: - // Needed because once we hit the start of the last branch, the next node is our currentParallelStart - FlowNode parallelStart = (nextType == NodeType.PARALLEL_START) ? myNext : this.currentParallelStartNode; - visitor.parallelBranchStart(parallelStart, myCurrent, this); - break; - default: - throw new IllegalStateException("Unhandled type for current node"); - } - } - } - -} diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java.orig b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java.orig deleted file mode 100644 index bb1c4233..00000000 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/LinearScanner.java.orig +++ /dev/null @@ -1,85 +0,0 @@ -/* - * The MIT License - * - * Copyright (c) 2016, CloudBees, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -package org.jenkinsci.plugins.workflow.graphanalysis; - -import org.jenkinsci.plugins.workflow.graph.FlowNode; - -import javax.annotation.Nonnull; -import javax.annotation.concurrent.NotThreadSafe; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -/** - * Scans through the flow graph in strictly linear fashion, visiting only the first branch in parallel blocks. - * - *

                      Iteration order: depth-ONLY, meaning we walk through parents and only follow the first parent of each {@link FlowNode} - * This means that where are parallel branches, we will only visit a partial set of {@link FlowNode}s in the directed acyclic graph. - * - *

                      Use case: we don't care about parallel branches or know they don't exist, we just want to walk through the top-level blocks. - * -<<<<<<< HEAD - *

                      This is the fastest & simplest way to walk a flow, because you only care about a single node at a time. - * Nuance: where there are multiple parent nodes (in a parallel block), and one is blacklisted, we'll find the first non-blacklisted one. - * @author Sam Van Oort -======= - *

                      This is the fastest and simplest way to walk a flow, because you only care about a single node at a time. - * @author Sam Van Oort ->>>>>>> block-scanning-APIs - */ -@NotThreadSafe -public class LinearScanner extends AbstractFlowScanner { - - @Override - protected void reset() { - this.myCurrent = null; - this.myNext = null; - this.myBlackList = Collections.EMPTY_SET; - } - - @Override - protected void setHeads(@Nonnull Collection heads) { - if (heads.size() > 0) { - this.myCurrent = heads.iterator().next(); - this.myNext = this.myCurrent; - } - } - - @Override - protected FlowNode next(FlowNode current, @Nonnull Collection blackList) { - if (current == null) { - return null; - } - List parents = current.getParents(); - if (parents != null && parents.size() > 0) { - for (FlowNode f : parents) { - if (!blackList.contains(f)) { - return f; - } - } - } - return null; - } -} diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java.orig b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java.orig deleted file mode 100644 index fcbace79..00000000 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java.orig +++ /dev/null @@ -1,489 +0,0 @@ -/* - * The MIT License - * - * Copyright (c) 2016, CloudBees, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -package org.jenkinsci.plugins.workflow.graphanalysis; - -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -<<<<<<< HEAD -======= -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; ->>>>>>> block-scanning-APIs -import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition; -import org.jenkinsci.plugins.workflow.cps.nodes.StepStartNode; -import org.jenkinsci.plugins.workflow.cps.steps.ParallelStep; -import org.jenkinsci.plugins.workflow.flow.FlowExecution; -import org.jenkinsci.plugins.workflow.graph.BlockStartNode; -import org.jenkinsci.plugins.workflow.graph.FlowGraphWalker; -import org.jenkinsci.plugins.workflow.graph.FlowNode; -import org.jenkinsci.plugins.workflow.job.WorkflowJob; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.jvnet.hudson.test.BuildWatcher; -import org.jvnet.hudson.test.JenkinsRule; -import org.junit.Assert; - -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; - -// Slightly dirty but it removes a ton of FlowTestUtils.* class qualifiers -import static org.jenkinsci.plugins.workflow.graphanalysis.FlowTestUtils.*; - -/** - * Tests for internals of ForkScanner - */ -public class ForkScannerTest { - @ClassRule - public static BuildWatcher buildWatcher = new BuildWatcher(); - - @Rule - public JenkinsRule r = new JenkinsRule(); - - public static Predicate predicateForCallEntryType(final TestVisitor.CallType type) { - return new Predicate() { - TestVisitor.CallType myType = type; - @Override - public boolean apply(TestVisitor.CallEntry input) { - return input.type != null && input.type == myType; - } - }; - } - - /** Flow structure (ID - type) - 2 - FlowStartNode (BlockStartNode) - 3 - Echostep - 4 - ParallelStep (StepStartNode) (start branches) - 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 - 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 - 8 - EchoStep, (branch 1) parent=6 - 9 - StepEndNode, (end branch 1) startId=6, parentId=8 - 10 - EchoStep, (branch 2) parentId=7 - 11 - EchoStep, (branch 2) parentId = 10 - 12 - StepEndNode (end branch 2) startId=7 parentId=11, - 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 - 14 - EchoStep - 15 - FlowEndNode (BlockEndNode) - */ - WorkflowRun SIMPLE_PARALLEL_RUN; - - /** Parallel nested in parallel (ID-type) - * 2 - FlowStartNode (BlockStartNode) - * 3 - Echostep - * 4 - ParallelStep (stepstartnode) - * 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 - * 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 - * 8 - EchoStep (branch #1) - parentId=6 - * 9 - StepEndNode (end branch #1) - startId=6 - * 10 - EchoStep - parentId=7 - * 11 - EchoStep - * 12 - ParallelStep (StepStartNode) - start inner parallel - * 14 - ParallelStep (StepStartNode) (start branch 2-1), parentId=12, ParallelLabellAction with branchName=2-1 - * 15 - ParallelStep (StepStartNode) (start branch 2-2), parentId=12, ParallelLabelAction with branchName=2-2 - * 16 - Echo (Branch2-1), parentId=14 - * 17 - StepEndNode (end branch 2-1), parentId=16, startId=14 - * 18 - SleepStep (branch 2-2) parentId=15 - * 19 - EchoStep (branch 2-2) - * 20 - StepEndNode (end branch 2-2), startId=15 - * 21 - StepEndNode (end inner parallel ), parentIds=17,20, startId=12 - * 22 - StepEndNode (end parallel #2), parent=21, startId=7 - * 23 - StepEndNode (end outer parallel), parentIds=9,22, startId=4 - * 24 - Echo - * 25 - FlowEndNode - */ - WorkflowRun NESTED_PARALLEL_RUN; - - @Before - public void setUp() throws Exception { - r.jenkins.getInjector().injectMembers(this); - - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "SimpleParallel"); - job.setDefinition(new CpsFlowDefinition( - "echo 'first'\n" + - "def steps = [:]\n" + - "steps['1'] = {\n" + - " echo 'do 1 stuff'\n" + - "}\n" + - "steps['2'] = {\n" + - " echo '2a'\n" + - " echo '2b'\n" + - "}\n" + - "parallel steps\n" + - "echo 'final'" - )); - WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - this.SIMPLE_PARALLEL_RUN = b; - - job = r.jenkins.createProject(WorkflowJob.class, "NestedParallel"); - job.setDefinition(new CpsFlowDefinition( - "echo 'first'\n" + - "def steps = [:]\n" + - "steps['1'] = {\n" + - " echo 'do 1 stuff'\n" + - "}\n" + - "steps['2'] = {\n" + - " echo '2a'\n" + - " echo '2b'\n" + - " def nested = [:]\n" + - " nested['2-1'] = {\n" + - " echo 'do 2-1'\n" + - " } \n" + - " nested['2-2'] = {\n" + - " sleep 1\n" + - " echo '2 section 2'\n" + - " }\n" + - " parallel nested\n" + - "}\n" + - "parallel steps\n" + - "echo 'final'" - )); - b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - this.NESTED_PARALLEL_RUN = b; - } - - public static Predicate PARALLEL_START_PREDICATE = new Predicate() { - @Override - public boolean apply(FlowNode input) { - return input != null && input instanceof StepStartNode && (((StepStartNode) input).getDescriptor().getClass() == ParallelStep.DescriptorImpl.class); - } - }; - - @Test - public void testForkedScanner() throws Exception { - FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); - Collection heads = SIMPLE_PARALLEL_RUN.getExecution().getCurrentHeads(); - - // Initial case - ForkScanner scanner = new ForkScanner(); - scanner.setup(heads, null); - ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE); - Assert.assertNull(scanner.currentParallelStart); - Assert.assertNull(scanner.currentParallelStartNode); - Assert.assertNotNull(scanner.parallelBlockStartStack); - Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); - Assert.assertTrue(scanner.isWalkingFromFinish()); - - // Fork case - scanner.setup(exec.getNode("13")); - Assert.assertFalse(scanner.isWalkingFromFinish()); - Assert.assertEquals(null, scanner.currentType); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_END, scanner.nextType); - Assert.assertEquals("13", scanner.next().getId()); - Assert.assertNotNull(scanner.parallelBlockStartStack); - Assert.assertEquals(0, scanner.parallelBlockStartStack.size()); - Assert.assertEquals(exec.getNode("4"), scanner.currentParallelStartNode); - - ForkScanner.ParallelBlockStart start = scanner.currentParallelStart; - Assert.assertEquals(1, start.unvisited.size()); - Assert.assertEquals(exec.getNode("4"), start.forkStart); - - Assert.assertEquals(exec.getNode("9"), scanner.next()); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getCurrentType()); - Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getNextType()); - Assert.assertEquals(exec.getNode("8"), scanner.next()); - Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getCurrentType()); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_START, scanner.getNextType()); - Assert.assertEquals(exec.getNode("6"), scanner.next()); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_START, scanner.getCurrentType()); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getNextType()); - FlowNode f = scanner.next(); - Assert.assertEquals(ForkScanner.NodeType.PARALLEL_BRANCH_END, scanner.getCurrentType()); - Assert.assertEquals(ForkScanner.NodeType.NORMAL, scanner.getNextType()); - Assert.assertEquals(exec.getNode("12"), f); - - // Now we test the least common ancestor bits - } - - /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */ - @Test - public void testFlowSegmentSplit() throws Exception { - FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); - - /** Flow structure (ID - type) - 2 - FlowStartNode (BlockStartNode) - 3 - Echostep - 4 - ParallelStep (StepStartNode) (start branches) - 6 - ParallelStep (StepStartNode) (start branch 1), ParallelLabelAction with branchname=1 - 7 - ParallelStep (StepStartNode) (start branch 2), ParallelLabelAction with branchname=2 - 8 - EchoStep, (branch 1) parent=6 - 9 - StepEndNode, (end branch 1) startId=6, parentId=8 - 10 - EchoStep, (branch 2) parentId=7 - 11 - EchoStep, (branch 2) parentId = 10 - 12 - StepEndNode (end branch 2) startId=7 parentId=11, - 13 - StepEndNode (close branches), parentIds = 9,12, startId=4 - 14 - EchoStep - 15 - FlowEndNode (BlockEndNode) - */ - - HashMap nodeMap = new HashMap(); - ForkScanner.FlowSegment mainBranch = new ForkScanner.FlowSegment(); - ForkScanner.FlowSegment sideBranch = new ForkScanner.FlowSegment(); - FlowNode BRANCH1_END = exec.getNode("9"); - FlowNode BRANCH2_END = exec.getNode("12"); - FlowNode START_PARALLEL = exec.getNode("4"); - - // Branch 1, we're going to run one flownode beyond the start of the parallel branch and then split - mainBranch.add(BRANCH1_END); - mainBranch.add(exec.getNode("8")); - mainBranch.add(exec.getNode("6")); - mainBranch.add(exec.getNode("4")); - mainBranch.add(exec.getNode("3")); // FlowNode beyond the fork point - for (FlowNode f : mainBranch.visited) { - nodeMap.put(f, mainBranch); - } - assertNodeOrder("Visited nodes", mainBranch.visited, 9, 8, 6, 4, 3); - - // Branch 2 - sideBranch.add(BRANCH2_END); - sideBranch.add(exec.getNode("11")); - sideBranch.add(exec.getNode("10")); - sideBranch.add(exec.getNode("7")); - for (FlowNode f : sideBranch.visited) { - nodeMap.put(f, sideBranch); - } - assertNodeOrder("Visited nodes", sideBranch.visited, 12, 11, 10, 7); - - ForkScanner.Fork forked = mainBranch.split(nodeMap, (BlockStartNode)exec.getNode("4"), sideBranch); - ForkScanner.FlowSegment splitSegment = (ForkScanner.FlowSegment)nodeMap.get(BRANCH1_END); // New branch - Assert.assertNull(splitSegment.after); - assertNodeOrder("Branch 1 split after fork", splitSegment.visited, 9, 8, 6); - - // Just the single node before the fork - Assert.assertEquals(forked, mainBranch.after); - assertNodeOrder("Head of flow, pre-fork", mainBranch.visited, 3); - - // Fork point - Assert.assertEquals(forked, nodeMap.get(START_PARALLEL)); - ForkScanner.FlowPiece[] follows = {splitSegment, sideBranch}; - Assert.assertArrayEquals(follows, forked.following.toArray()); - - // Branch 2 - Assert.assertEquals(sideBranch, nodeMap.get(BRANCH2_END)); - assertNodeOrder("Branch 2", sideBranch.visited, 12, 11, 10, 7); - - // Test me where splitting right at a fork point, where we should have a fork with and main branch shoudl become following - // Along with side branch (branch2) - nodeMap.clear(); - mainBranch = new ForkScanner.FlowSegment(); - sideBranch = new ForkScanner.FlowSegment(); - mainBranch.visited.add(exec.getNode("6")); - mainBranch.visited.add(START_PARALLEL); - sideBranch.visited.add(exec.getNode("7")); - for (FlowNode f : mainBranch.visited) { - nodeMap.put(f, mainBranch); - } - nodeMap.put(exec.getNode("7"), sideBranch); - - forked = mainBranch.split(nodeMap, (BlockStartNode)exec.getNode("4"), sideBranch); - follows = new ForkScanner.FlowSegment[2]; - follows[0] = mainBranch; - follows[1] = sideBranch; - Assert.assertArrayEquals(follows, forked.following.toArray()); - assertNodeOrder("Branch1", mainBranch.visited, 6); - Assert.assertNull(mainBranch.after); - assertNodeOrder("Branch2", sideBranch.visited, 7); - Assert.assertNull(sideBranch.after); - Assert.assertEquals(forked, nodeMap.get(START_PARALLEL)); - Assert.assertEquals(mainBranch, nodeMap.get(exec.getNode("6"))); - Assert.assertEquals(sideBranch, nodeMap.get(exec.getNode("7"))); - } - - @Test - public void testEmptyParallel() throws Exception { - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "EmptyParallel"); - job.setDefinition(new CpsFlowDefinition( - "parallel 'empty1': {}, 'empty2':{} \n" + - "echo 'done' " - )); - WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - ForkScanner scan = new ForkScanner(); - - List outputs = scan.filteredNodes(b.getExecution().getCurrentHeads(), (Predicate) Predicates.alwaysTrue()); - Assert.assertEquals(9, outputs.size()); - } - - /** Reference the flow graphs in {@link #SIMPLE_PARALLEL_RUN} and {@link #NESTED_PARALLEL_RUN} */ - @Test - public void testLeastCommonAncestor() throws Exception { - FlowExecution exec = SIMPLE_PARALLEL_RUN.getExecution(); - - ForkScanner scan = new ForkScanner(); - // Starts at the ends of the parallel branches - Set heads = new LinkedHashSet(Arrays.asList(exec.getNode("12"), exec.getNode("9"))); - ArrayDeque starts = scan.leastCommonAncestor(heads); - Assert.assertEquals(1, starts.size()); - - ForkScanner.ParallelBlockStart start = starts.peek(); - Assert.assertEquals(2, start.unvisited.size()); - Assert.assertEquals(exec.getNode("4"), start.forkStart); - Assert.assertArrayEquals(heads.toArray(), start.unvisited.toArray()); - - /** Now we do the same with nested run */ - exec = NESTED_PARALLEL_RUN.getExecution(); - heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20"))); - - // Problem: we get a parallel start with the same flowsegment in the following for more than one parallel start - starts = scan.leastCommonAncestor(heads); - Assert.assertEquals(2, starts.size()); - ForkScanner.ParallelBlockStart inner = starts.getFirst(); - ForkScanner.ParallelBlockStart outer = starts.getLast(); - - Assert.assertEquals(2, inner.unvisited.size()); - Assert.assertEquals(exec.getNode("12"), inner.forkStart); - - Assert.assertEquals(1, outer.unvisited.size()); - Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek()); - Assert.assertEquals(exec.getNode("4"), outer.forkStart); - } - - /** For nodes, see {@link #SIMPLE_PARALLEL_RUN} */ - @Test - public void testSimpleVisitor() throws Exception { - ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE); - FlowExecution exec = this.SIMPLE_PARALLEL_RUN.getExecution(); - ForkScanner f = new ForkScanner(); - f.setup(exec.getCurrentHeads()); - TestVisitor visitor = new TestVisitor(); - - f.visitSimpleChunks(visitor, new BlockChunkFinder()); - - // 13 calls for chunk/atoms, 6 for parallels - Assert.assertEquals(19, visitor.calls.size()); - - // End has nothing after it, just last node (15) - TestVisitor.CallEntry last = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_END, 15, -1, -1, -1); - last.assertEquals(visitor.calls.get(0)); - - // Start has nothing before it, just the first node (2) - TestVisitor.CallEntry first = new TestVisitor.CallEntry(TestVisitor.CallType.CHUNK_START, 2, -1, -1, -1); - first.assertEquals(visitor.calls.get(18)); - - int chunkStartCount = Iterables.size(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.CHUNK_START))); - int chunkEndCount = Iterables.size(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.CHUNK_END))); - Assert.assertEquals(4, chunkStartCount); - Assert.assertEquals(4, chunkEndCount); - - // Verify the AtomNode calls are correct - List < TestVisitor.CallEntry > atomNodeCalls = Lists.newArrayList(Iterables.filter(visitor.calls, predicateForCallEntryType(TestVisitor.CallType.ATOM_NODE))); - Assert.assertEquals(5, atomNodeCalls.size()); - for (TestVisitor.CallEntry ce : atomNodeCalls) { - int beforeId = ce.ids[0]; - int atomNodeId = ce.ids[1]; - int afterId = ce.ids[2]; - int alwaysEmpty = ce.ids[3]; - Assert.assertTrue(ce+" beforeNodeId <= 0: "+beforeId, beforeId > 0); - Assert.assertTrue(ce + " atomNodeId <= 0: " + atomNodeId, atomNodeId > 0); - Assert.assertTrue(ce+" afterNodeId <= 0: "+afterId, afterId > 0); - Assert.assertEquals(-1, alwaysEmpty); - Assert.assertTrue(ce + "AtomNodeId >= afterNodeId", atomNodeId < afterId); - Assert.assertTrue(ce+ "beforeNodeId >= atomNodeId", beforeId < atomNodeId); - } - - - List parallelCalls = Lists.newArrayList(Iterables.filter(visitor.calls, new Predicate() { - @Override - public boolean apply(TestVisitor.CallEntry input) { - return input.type != null - && input.type != TestVisitor.CallType.ATOM_NODE - && input.type != TestVisitor.CallType.CHUNK_START - && input.type != TestVisitor.CallType.CHUNK_END; - } - })); - Assert.assertEquals(6, parallelCalls.size()); - // Start to end - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_END, 4, 13).assertEquals(parallelCalls.get(0)); - - //Tests for parallel handling - // Start to end, in reverse order - - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 9).assertEquals(parallelCalls.get(1)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 6).assertEquals(parallelCalls.get(2)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_END, 4, 12).assertEquals(parallelCalls.get(3)); - - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_BRANCH_START, 4, 7).assertEquals(parallelCalls.get(4)); - new TestVisitor.CallEntry(TestVisitor.CallType.PARALLEL_START, 4, 7).assertEquals(parallelCalls.get(5)); - - } - - /** Checks for off-by one cases with multiple parallel */ - @Test - public void testTripleParallel() throws Exception { - WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "TripleParallel"); - job.setDefinition(new CpsFlowDefinition( - "stage 'test'\n"+ // Id 3, Id 2 before that has the FlowStartNode - "parallel 'unit':{\n" + // Id 4 starts parallel, Id 7 is the block start for the unit branch - " echo \"Unit testing...\"\n" + // Id 10 - "},'integration':{\n" + // Id 11 is unit branch end, Id 8 is the branch start for integration branch - " echo \"Integration testing...\"\n" + // Id 12 - "}, 'ui':{\n" + // Id 13 in integration branch end, Id 9 is branch start for UI branch - " echo \"UI testing...\"\n" + // Id 14 - "}" // Node 15 is UI branch end node, Node 16 is Parallel End node, Node 17 is FlowWendNode - )); - WorkflowRun b = r.assertBuildStatusSuccess(job.scheduleBuild2(0)); - - ForkScanner.setParallelStartPredicate(PARALLEL_START_PREDICATE); - FlowExecution exec = b.getExecution(); - ForkScanner f = new ForkScanner(); - f.setup(exec.getCurrentHeads()); - TestVisitor visitor = new TestVisitor(); - f.visitSimpleChunks(visitor, new BlockChunkFinder()); - - ArrayList parallels = Lists.newArrayList(Iterables.filter(visitor.calls, - Predicates.or( - predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_START), - predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_END)) - ) - ); - Assert.assertEquals(6, parallels.size()); - - // Visiting from partially completed branches - // Verify we still get appropriate parallels callbacks for a branch end - // even if in-progress and no explicit end node - ArrayList ends = new ArrayList(); - ends.add(exec.getNode("11")); - ends.add(exec.getNode("12")); - ends.add(exec.getNode("14")); - visitor = new TestVisitor(); - f.setup(ends); - f.visitSimpleChunks(visitor, new BlockChunkFinder()); - parallels = Lists.newArrayList(Iterables.filter(visitor.calls, - Predicates.or( - predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_START), - predicateForCallEntryType(TestVisitor.CallType.PARALLEL_BRANCH_END)) - ) - ); - Assert.assertEquals(6, parallels.size()); - Assert.assertEquals(17, visitor.calls.size()); - } -} From 78e8d2c0826d172e123d303420dfef469b7ebf91 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 11:07:34 -0400 Subject: [PATCH 102/104] Revert special versioning in prep for release --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 6e6a16d9..19be6323 100644 --- a/pom.xml +++ b/pom.xml @@ -33,7 +33,7 @@ org.jenkins-ci.plugins.workflow workflow-api - 2.2-blockapis-SNAPSHOT + 2.2-SNAPSHOT hpi Pipeline: API https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+API+Plugin From 8abdf34afc8b5d0de8ecd9f4cb3ff6be32d91ede Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 13:04:45 -0400 Subject: [PATCH 103/104] Fix a missing DepthFirstScanner reset of field --- .../plugins/workflow/graphanalysis/DepthFirstScanner.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java index 59afbedc..7fd3e9ae 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/DepthFirstScanner.java @@ -58,6 +58,7 @@ protected void reset() { } this.visited.clear(); this.myCurrent = null; + this.myNext = null; } @Override From 3d3bf57aa8749f83af5104be677db00bd0213dd6 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 24 Aug 2016 21:40:46 -0400 Subject: [PATCH 104/104] Fix and thoroughly test against a bug with ForkScanner failing when handed just the branch start nodes for a parallel block --- .../workflow/graphanalysis/ForkScanner.java | 15 ++++++-- .../graphanalysis/FlowScannerTest.java | 3 +- .../graphanalysis/ForkScannerTest.java | 34 ++++++++++++++++++- 3 files changed, 46 insertions(+), 6 deletions(-) diff --git a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java index fee4fe13..f6c2dfbf 100644 --- a/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java +++ b/src/main/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScanner.java @@ -39,6 +39,8 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.ListIterator; @@ -303,15 +305,22 @@ ArrayDeque convertForksToBlockStarts(ArrayDeque parall * * @param heads */ - ArrayDeque leastCommonAncestor(@Nonnull Set heads) { + ArrayDeque leastCommonAncestor(@Nonnull final Set heads) { HashMap branches = new HashMap(); ArrayList> iterators = new ArrayList>(); ArrayList livePieces = new ArrayList(); ArrayDeque parallelForks = new ArrayDeque(); // Tracks the discovered forks in order of encounter + Predicate notAHead = new Predicate() { // Filter out pre-existing heads + Collection checkHeads = convertToFastCheckable(heads); + + @Override + public boolean apply(FlowNode input) { return !(checkHeads.contains(input)); } + }; + for (FlowNode f : heads) { - iterators.add(FlowScanningUtils.fetchEnclosingBlocks(f)); + iterators.add(FlowScanningUtils.fetchEnclosingBlocks(f).filter(notAHead)); // We can do this because Parallels always meet at a BlockStartNode FlowSegment b = new FlowSegment(); b.add(f); livePieces.add(b); @@ -349,7 +358,7 @@ ArrayDeque leastCommonAncestor(@Nonnull Set heads) pieceIterator.remove(); pieceIterator.add(newSegment); branches.put(nextBlockStart, newSegment); - } else if (existingPiece != null) { // Always not null. We're merging into another thing, we're going to elliminate a branch + } else if (existingPiece != null) { // Always not null. We're merging into another thing, we're going to eliminate a branch if (existingPiece instanceof Fork) { ((Fork) existingPiece).following.add(myPiece); } else { // Split a flow segment so it forks against this one diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java index fea95b61..9ddaed42 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/FlowScannerTest.java @@ -88,11 +88,10 @@ public void testAbstractScanner() throws Exception { AbstractFlowScanner linear = new LinearScanner(); // ## Bunch of tests for convertToFastCheckable ## - Collection coll = linear.convertToFastCheckable(null); Assert.assertEquals(Collections.EMPTY_SET, linear.convertToFastCheckable(null)); Assert.assertEquals(Collections.EMPTY_SET, linear.convertToFastCheckable(new ArrayList())); - coll = linear.convertToFastCheckable(Arrays.asList(intermediateNode)); + Collection coll = linear.convertToFastCheckable(Arrays.asList(intermediateNode)); Assert.assertTrue("Singleton set used for one element", coll instanceof AbstractSet); Assert.assertEquals(1, coll.size()); diff --git a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java index 99f2f7de..3a531418 100644 --- a/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java +++ b/src/test/java/org/jenkinsci/plugins/workflow/graphanalysis/ForkScannerTest.java @@ -51,6 +51,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -346,6 +347,22 @@ public void testLeastCommonAncestor() throws Exception { Assert.assertEquals(exec.getNode("4"), start.forkStart); Assert.assertArrayEquals(heads.toArray(), start.unvisited.toArray()); + // Ensure no issues with single start triggering least common ancestor + heads = new LinkedHashSet(Arrays.asList(exec.getNode("4"))); + scan.setup(heads); + Assert.assertNull(scan.currentParallelStart); + Assert.assertTrue(scan.parallelBlockStartStack == null || scan.parallelBlockStartStack.isEmpty()); + + // Empty fork + heads = new LinkedHashSet(Arrays.asList(exec.getNode("6"), exec.getNode("7"))); + starts = scan.leastCommonAncestor(heads); + Assert.assertEquals(1, starts.size()); + ForkScanner.ParallelBlockStart pbs = starts.pop(); + Assert.assertEquals(exec.getNode("4"), pbs.forkStart); + Assert.assertEquals(2, pbs.unvisited.size()); + Assert.assertTrue(pbs.unvisited.contains(exec.getNode("6"))); + Assert.assertTrue(pbs.unvisited.contains(exec.getNode("7"))); + /** Now we do the same with nested run */ exec = NESTED_PARALLEL_RUN.getExecution(); heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20"))); @@ -362,6 +379,10 @@ public void testLeastCommonAncestor() throws Exception { Assert.assertEquals(1, outer.unvisited.size()); Assert.assertEquals(exec.getNode("9"), outer.unvisited.peek()); Assert.assertEquals(exec.getNode("4"), outer.forkStart); + + heads = new LinkedHashSet(Arrays.asList(exec.getNode("9"), exec.getNode("17"), exec.getNode("20"))); + starts = scan.leastCommonAncestor(heads); + Assert.assertEquals(2, starts.size()); } /** For nodes, see {@link #SIMPLE_PARALLEL_RUN} */ @@ -433,7 +454,7 @@ public boolean apply(TestVisitor.CallEntry input) { } - /** Checks for off-by one cases with multiple parallel */ + /** Checks for off-by one cases with multiple parallel, and with the leastCommonAncestor */ @Test public void testTripleParallel() throws Exception { WorkflowJob job = r.jenkins.createProject(WorkflowJob.class, "TripleParallel"); @@ -482,5 +503,16 @@ public void testTripleParallel() throws Exception { ); Assert.assertEquals(6, parallels.size()); Assert.assertEquals(17, visitor.calls.size()); + + // Test the least common ancestor implementation with triplicate + FlowNode[] branchHeads = {exec.getNode("7"), exec.getNode("8"), exec.getNode("9")}; + ArrayDeque starts = f.leastCommonAncestor(new HashSet(Arrays.asList(branchHeads))); + Assert.assertEquals(1, starts.size()); + ForkScanner.ParallelBlockStart pbs = starts.pop(); + Assert.assertEquals(exec.getNode("4"), pbs.forkStart); + Assert.assertEquals(3, pbs.unvisited.size()); + Assert.assertTrue(pbs.unvisited.contains(exec.getNode("7"))); + Assert.assertTrue(pbs.unvisited.contains(exec.getNode("8"))); + Assert.assertTrue(pbs.unvisited.contains(exec.getNode("9"))); } }