Skip to content

Commit

Permalink
Merge pull request #32994 from werdmann/improve-clustering-speed
Browse files Browse the repository at this point in the history
Improve the execution speed of the DA clusterizers for primary vertex finding
  • Loading branch information
cmsbuild authored Mar 6, 2021
2 parents 7671deb + 883cad1 commit 33735d5
Show file tree
Hide file tree
Showing 7 changed files with 203 additions and 173 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3746,6 +3746,7 @@ void PrimaryVertexValidation::fillDescriptions(edm::ConfigurationDescriptions& d
psd1.add<double>("coolingFactor", 0.6);
psd1.add<double>("vertexSize", 0.006);
psd1.add<double>("uniquetrkweight", 0.8);
psd1.add<double>("uniquetrkminp", 0.0);
psd1.add<double>("zrange", 4.0);
psd1.add<double>("tmerge", 0.01); // 4D only
psd1.add<double>("dtCutOff", 4.); // 4D only
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ class DAClusterizerInZT_vect final : public TrackClusterizerInZ {
std::vector<unsigned int> kmax; // 1 + index of the last cluster within zrange
std::vector<const reco::TransientTrack *> tt; // a pointer to the Transient Track

double osumtkwt; // 1. / (sum of all track weights)

void addItem(double new_zpca,
double new_tpca,
double new_dz2,
Expand Down Expand Up @@ -302,6 +304,7 @@ class DAClusterizerInZT_vect final : public TrackClusterizerInZ {

double mintrkweight_;
double uniquetrkweight_;
double uniquetrkminp_;
double zmerge_;
double tmerge_;
double betapurge_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ class DAClusterizerInZ_vect final : public TrackClusterizerInZ {
std::vector<unsigned int> kmax; // 1 + index of the last cluster within zrange
std::vector<const reco::TransientTrack *> tt; // a pointer to the Transient Track

double osumtkwt; // 1. / (sum of all track weights)

void addItemSorted(double new_zpca, double new_dz2, const reco::TransientTrack *new_tt, double new_tkwt) {
// sort tracks with decreasing resolution (note that dz2 = 1/sigma^2)
unsigned int i = 0;
Expand Down Expand Up @@ -206,6 +208,7 @@ class DAClusterizerInZ_vect final : public TrackClusterizerInZ {

double mintrkweight_;
double uniquetrkweight_;
double uniquetrkminp_;
double zmerge_;
double betapurge_;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@ PrimaryVertexProducer::PrimaryVertexProducer(const edm::ParameterSet& conf) : th
} else if (trackSelectionAlgorithm == "filterWithThreshold") {
theTrackFilter = new HITrackFilterForPVFinding(conf.getParameter<edm::ParameterSet>("TkFilterParameters"));
} else {
throw VertexException("PrimaryVertexProducerAlgorithm: unknown track selection algorithm: " +
trackSelectionAlgorithm);
throw VertexException("PrimaryVertexProducer: unknown track selection algorithm: " + trackSelectionAlgorithm);
}

// select and configure the track clusterizer
Expand All @@ -57,7 +56,7 @@ PrimaryVertexProducer::PrimaryVertexProducer(const edm::ParameterSet& conf) : th
}

else {
throw VertexException("PrimaryVertexProducerAlgorithm: unknown clustering algorithm: " + clusteringAlgorithm);
throw VertexException("PrimaryVertexProducer: unknown clustering algorithm: " + clusteringAlgorithm);
}

if (f4D) {
Expand All @@ -80,7 +79,7 @@ PrimaryVertexProducer::PrimaryVertexProducer(const edm::ParameterSet& conf) : th
} else if (fitterAlgorithm == "AdaptiveVertexFitter") {
algorithm.fitter = new AdaptiveVertexFitter(GeometricAnnealing(algoconf->getParameter<double>("chi2cutoff")));
} else {
throw VertexException("PrimaryVertexProducerAlgorithm: unknown algorithm: " + fitterAlgorithm);
throw VertexException("PrimaryVertexProducer: unknown algorithm: " + fitterAlgorithm);
}
algorithm.label = algoconf->getParameter<std::string>("label");
algorithm.minNdof = algoconf->getParameter<double>("minNdof");
Expand Down Expand Up @@ -120,10 +119,10 @@ PrimaryVertexProducer::PrimaryVertexProducer(const edm::ParameterSet& conf) : th
fRecoveryIteration = conf.getParameter<bool>("isRecoveryIteration");
if (fRecoveryIteration) {
if (algorithms.empty()) {
throw VertexException("PrimaryVertexProducerAlgorithm: No algorithm specified. ");
throw VertexException("PrimaryVertexProducer: No algorithm specified. ");
} else if (algorithms.size() > 1) {
throw VertexException(
"PrimaryVertexProducerAlgorithm: Running in Recovery mode and more than one algorithm specified. Please "
"PrimaryVertexProducer: Running in Recovery mode and more than one algorithm specified. Please "
"only one algorithm.");
}
recoveryVtxToken = consumes<reco::VertexCollection>(conf.getParameter<edm::InputTag>("recoveryVtxCollection"));
Expand All @@ -144,7 +143,7 @@ PrimaryVertexProducer::~PrimaryVertexProducer() {
}

void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& iSetup) {
// get the BeamSpot, it will alwys be needed, even when not used as a constraint
// get the BeamSpot, it will always be needed, even when not used as a constraint
reco::BeamSpot beamSpot;
edm::Handle<reco::BeamSpot> recoBeamSpotHandle;
iEvent.getByToken(bsToken, recoBeamSpotHandle);
Expand Down Expand Up @@ -251,8 +250,10 @@ void PrimaryVertexProducer::produce(edm::Event& iEvent, const edm::EventSetup& i
if (f4D) {
if (v.isValid()) {
auto err = v.positionError().matrix4D();
auto trkweightMap3d = v.weightMap(); // copy the 3 fit weights
err(3, 3) = vartime;
v = TransientVertex(v.position(), meantime, err, v.originalTracks(), v.totalChiSquared());
v.weightMap(trkweightMap3d);
}
}

Expand Down Expand Up @@ -427,6 +428,7 @@ void PrimaryVertexProducer::fillDescriptions(edm::ConfigurationDescriptions& des
psd1.add<double>("coolingFactor", 0.6);
psd1.add<double>("vertexSize", 0.006);
psd1.add<double>("uniquetrkweight", 0.8);
psd1.add<double>("uniquetrkminp", 0.0);
psd1.add<double>("zrange", 4.0);

psd1.add<double>("tmerge", 0.01); // 4D only
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
d0CutOff = cms.double(3.), # downweight high IP tracks
dzCutOff = cms.double(3.), # outlier rejection after freeze-out (T<Tmin)
zmerge = cms.double(1e-2), # merge intermediat clusters separated by less than zmerge
uniquetrkweight = cms.double(0.8) # require at least two tracks with this weight at T=Tpurge
uniquetrkweight = cms.double(0.8),# require at least two tracks with this weight at T=Tpurge
uniquetrkminp = cms.double(0.0) # minimal a priori track weight for counting unique tracks
)
)

Expand Down Expand Up @@ -53,6 +54,7 @@
t0Max = cms.double(1.0), # outlier rejection for use of timing information
zmerge = cms.double(1e-2), # merge intermediat clusters separated by less than zmerge and tmerge
tmerge = cms.double(1e-1), # merge intermediat clusters separated by less than zmerge and tmerge
uniquetrkweight = cms.double(0.8) # require at least two tracks with this weight at T=Tpurge
uniquetrkweight = cms.double(0.8),# require at least two tracks with this weight at T=Tpurge
uniquetrkminp = cms.double(0.0) # minimal a priori track weight for counting unique tracks
)
)
Loading

0 comments on commit 33735d5

Please sign in to comment.