2 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 package io.fd.honeycomb.translate.impl.write.registry;
19 import static com.google.common.base.Preconditions.checkArgument;
20 import static com.google.common.base.Preconditions.checkNotNull;
21 import static java.util.stream.Collectors.toMap;
23 import com.google.common.base.Optional;
24 import com.google.common.collect.ImmutableMap;
25 import com.google.common.collect.Lists;
26 import com.google.common.collect.Multimap;
27 import com.google.common.collect.Sets;
28 import io.fd.honeycomb.translate.TranslationException;
29 import io.fd.honeycomb.translate.util.RWUtils;
30 import io.fd.honeycomb.translate.write.DataObjectUpdate;
31 import io.fd.honeycomb.translate.write.DataValidationFailedException;
32 import io.fd.honeycomb.translate.write.WriteContext;
33 import io.fd.honeycomb.translate.write.Writer;
34 import io.fd.honeycomb.translate.write.registry.UpdateFailedException;
35 import io.fd.honeycomb.translate.write.registry.WriterRegistry;
36 import java.util.ArrayList;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.LinkedList;
40 import java.util.List;
43 import java.util.function.Function;
44 import java.util.stream.Collectors;
45 import javax.annotation.Nonnull;
46 import javax.annotation.Nullable;
47 import javax.annotation.concurrent.ThreadSafe;
48 import org.opendaylight.yangtools.yang.binding.DataObject;
49 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
50 import org.slf4j.Logger;
51 import org.slf4j.LoggerFactory;
54 * Flat writer registry, delegating updates to writers in the order writers were submitted.
57 final class FlatWriterRegistry implements WriterRegistry {
59 private static final Logger LOG = LoggerFactory.getLogger(FlatWriterRegistry.class);
61 private final Set<InstanceIdentifier<?>> writersOrderReversed;
62 private final Set<InstanceIdentifier<?>> writersOrder;
63 private final Map<InstanceIdentifier<?>, Writer<?>> writersById;
64 private final Set<? extends Writer<?>> writers;
67 * Create flat registry instance.
69 * @param writersById immutable, ordered map of writers to use to process updates. Order of the writers has to be one in
70 * which create and update operations should be handled. Deletes will be handled in reversed order.
71 * All deletes are handled before handling all the updates.
73 FlatWriterRegistry(@Nonnull final ImmutableMap<InstanceIdentifier<?>, Writer<?>> writersById) {
74 this.writersById = writersById;
75 this.writersOrderReversed = Sets.newLinkedHashSet(Lists.reverse(Lists.newArrayList(writersById.keySet())));
76 this.writersOrder = writersById.keySet();
77 this.writers = writersById.entrySet().stream().map(Map.Entry::getValue).collect(Collectors.toSet());
81 public void validateModifications(@Nonnull final DataObjectUpdates updates, @Nonnull final WriteContext ctx)
82 throws DataValidationFailedException {
83 // Optimization: validation order is not relevant, so do not merge deletes and updates.
84 validateModifications(updates.getDeletes(), ctx);
85 validateModifications(updates.getUpdates(), ctx);
88 private void validateModifications(
89 @Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
90 @Nonnull final WriteContext ctx) throws DataValidationFailedException {
91 if (updates.isEmpty()) {
94 // Fail early if some handlers are missing.
95 checkAllTypesCanBeHandled(updates);
97 // Validators do not modify anything, so order of validators is not important.
98 // We iterate over writersOrder instead of modifications for consistent handling of subtree writers case.
99 for (InstanceIdentifier<?> writerType : writersOrder) {
100 final Writer<?> writer = getWriter(writerType);
101 for (DataObjectUpdate singleUpdate : getWritersData(updates, writer, writerType, ctx)) {
102 writer.validate(singleUpdate.getId(), singleUpdate.getDataBefore(), singleUpdate.getDataAfter(), ctx);
108 public void processModifications(@Nonnull final DataObjectUpdates updates,
109 @Nonnull final WriteContext ctx) throws TranslationException {
110 if (updates.isEmpty()) {
114 // ordered set of already processed nodes
115 final List<DataObjectUpdate> alreadyProcessed = new LinkedList<>();
117 // Optimization for single type updates, less consuming for pairing update with responsible writer,etc
118 if (updates.containsOnlySingleType()) {
119 // First process delete
120 singleUpdate(updates.getDeletes(), alreadyProcessed, ctx);
123 singleUpdate(updates.getUpdates(), alreadyProcessed, ctx);
125 // First process deletes
126 bulkUpdate(updates.getDeletes(), alreadyProcessed, ctx, writersOrderReversed);
129 bulkUpdate(updates.getUpdates(), alreadyProcessed, ctx, writersOrder);
132 LOG.debug("Update successful for types: {}", updates.getTypeIntersection());
133 LOG.trace("Update successful for: {}", updates);
137 public boolean writerSupportsUpdate(@Nonnull final InstanceIdentifier<?> type) {
138 Writer writer = getWriter(type);
140 if (writer == null) {
141 writer = getSubtreeWriterResponsible(type);
144 return checkNotNull(writer, "Unable to find writer for %s", type).supportsDirectUpdate();
147 private void singleUpdate(
148 @Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
149 @Nonnull final List<DataObjectUpdate> alreadyProcessed,
150 @Nonnull final WriteContext ctx) throws UpdateFailedException {
151 if (updates.isEmpty()) {
155 DataObjectUpdate current = null;
156 final InstanceIdentifier<?> singleType = updates.keySet().iterator().next();
157 LOG.debug("Performing single type update for: {}", singleType);
158 Collection<? extends DataObjectUpdate> singleTypeUpdates = updates.get(singleType);
159 Writer<?> writer = getWriter(singleType);
161 if (writer == null) {
162 // This node must be handled by a subtree writer, find it and call it or else fail
163 writer = getSubtreeWriterResponsible(singleType);
164 checkArgument(writer != null, "Unable to process update. Missing writers for: %s",
166 singleTypeUpdates = getParentDataObjectUpdate(ctx, updates, writer);
170 LOG.trace("Performing single type update with writer: {}", writer);
172 for (DataObjectUpdate singleUpdate : singleTypeUpdates) {
173 current = singleUpdate;
174 writer.processModification(singleUpdate.getId(), singleUpdate.getDataBefore(),
175 singleUpdate.getDataAfter(),
177 alreadyProcessed.add(singleUpdate);
179 } catch (Exception e) {
180 throw new UpdateFailedException(e, alreadyProcessed, current);
185 private Writer<?> getSubtreeWriterResponsible(final InstanceIdentifier<?> singleType) {
186 return writersById.values().stream()
187 .filter(w -> w instanceof SubtreeWriter)
188 .filter(w -> w.canProcess(singleType))
193 static Collection<DataObjectUpdate> getParentDataObjectUpdate(final WriteContext ctx,
194 final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
195 final Writer<?> writer) {
196 // Now read data for subtree reader root, but first keyed ID is needed and that ID can be cut from updates
197 return ((SubtreeWriter<?>) writer).getHandledChildTypes().stream()
198 .filter(updates::containsKey)
199 .map(unkeyedId -> updates.get(unkeyedId))
200 .flatMap(doUpdates -> doUpdates.stream())
201 .map(DataObjectUpdate::getId)
202 .map(id -> getSingleParentDataObjectUpdate(ctx, (Multimap<InstanceIdentifier<?>, DataObjectUpdate>) updates, writer, id))
203 // Reduce the list of updates by putting them to a map. If a subtree writer for container gets 2 children updated, it will
204 // get only a single update, however if its a registered on a listand 2 different list items get their children updated,
205 // both updates should be preserved.
206 // Essentially, only group child updates in case the ID from root to writer is identical
207 .collect(toMap(update -> RWUtils.cutId(update.getId(), writer.getManagedDataObjectType()), Function.identity(), (u1, u2) -> u1))
211 private static DataObjectUpdate getSingleParentDataObjectUpdate(WriteContext ctx, Multimap<InstanceIdentifier<?>, DataObjectUpdate> updates, Writer<?> writer, InstanceIdentifier<?> firstAffectedChildId) {
212 final InstanceIdentifier<?> parentKeyedId =
213 RWUtils.cutId(firstAffectedChildId, writer.getManagedDataObjectType());
215 final Optional<? extends DataObject> parentBefore = ctx.readBefore(parentKeyedId);
216 final Optional<? extends DataObject> parentAfter = ctx.readAfter(parentKeyedId);
218 // Put the parent update data into updates map so that revert can also access the state
219 DataObjectUpdate parentUpdate = DataObjectUpdate.create(parentKeyedId, parentBefore.orNull(), parentAfter.orNull());
220 updates.put(RWUtils.makeIidWildcarded(parentKeyedId), parentUpdate);
224 private void bulkUpdate(
225 @Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates,
226 @Nonnull final List<DataObjectUpdate> alreadyProcessed,
227 @Nonnull final WriteContext ctx,
228 @Nonnull final Set<InstanceIdentifier<?>> writersOrder) throws UpdateFailedException {
229 if (updates.isEmpty()) {
233 // Check that all updates can be handled
234 checkAllTypesCanBeHandled(updates);
236 LOG.debug("Performing bulk update for: {}", updates.keySet());
237 // Iterate over all writers and call update if there are any related updates
238 for (InstanceIdentifier<?> writerType : writersOrder) {
239 final Writer<?> writer = getWriter(writerType);
240 LOG.debug("Performing update for: {}", writerType);
241 LOG.trace("Performing update with writer: {}", writer);
243 for (DataObjectUpdate singleUpdate : getWritersData(updates, writer, writerType, ctx)) {
245 writer.processModification(singleUpdate.getId(), singleUpdate.getDataBefore(),
246 singleUpdate.getDataAfter(), ctx);
247 } catch (Exception e) {
248 throw new UpdateFailedException(e, alreadyProcessed, singleUpdate);
250 alreadyProcessed.add(singleUpdate);
251 LOG.trace("Update successful for type: {}", writerType);
252 LOG.debug("Update successful for: {}", singleUpdate);
257 private Collection<? extends DataObjectUpdate> getWritersData(
258 final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates, final Writer<?> writer,
259 final InstanceIdentifier<?> writerType, final WriteContext ctx) {
260 Collection<? extends DataObjectUpdate> writersData = updates.get(writerType);
262 if (writersData.isEmpty()) {
263 // If there are no data for current writer, but it is a SubtreeWriter and there are updates to
264 // its children, still invoke it with its root data.
265 // Notice that child updates will be ignored if the set of all modifications
266 // contain both parent update and child updates. But this is ok, since all changes are already expressed in
267 // the parent update.
268 if (writer instanceof SubtreeWriter<?> && isAffected((SubtreeWriter<?>) writer, updates)) {
269 // Provide parent data for SubtreeWriter for further processing
270 writersData = getParentDataObjectUpdate(ctx, updates, writer);
272 // Skipping unaffected writer
273 // Alternative to this would be modification sort according to the order of writers
274 return Collections.emptyList();
280 private void checkAllTypesCanBeHandled(
281 @Nonnull final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates) {
283 List<InstanceIdentifier<?>> noWriterNodes = new ArrayList<>();
284 for (InstanceIdentifier<?> id : updates.keySet()) {
285 // either there is direct writer for the iid
287 if (writersById.containsKey(id) || writers.stream().anyMatch(o -> o.canProcess(id))) {
290 noWriterNodes.add(id);
293 if (!noWriterNodes.isEmpty()) {
294 throw new IllegalArgumentException("Unable to process update. Missing writers for: " + noWriterNodes);
299 * Check whether {@link SubtreeWriter} is affected by the updates.
301 * @return true if there are any updates to SubtreeWriter's child nodes (those marked by SubtreeWriter as being
304 private static boolean isAffected(final SubtreeWriter<?> writer,
305 final Multimap<InstanceIdentifier<?>, ? extends DataObjectUpdate> updates) {
306 return !Sets.intersection(writer.getHandledChildTypes(), updates.keySet()).isEmpty();
310 private Writer<?> getWriter(@Nonnull final InstanceIdentifier<?> singleType) {
311 return writersById.get(singleType);