Line data Source code
1 : /*
2 : *
3 : * Copyright (c) 2021 Project CHIP Authors
4 : * All rights reserved.
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include <access/AccessRestrictionProvider.h>
20 : #include <access/Privilege.h>
21 : #include <app/AppConfig.h>
22 : #include <app/AttributePathExpandIterator.h>
23 : #include <app/ConcreteEventPath.h>
24 : #include <app/GlobalAttributes.h>
25 : #include <app/InteractionModelEngine.h>
26 : #include <app/MessageDef/StatusIB.h>
27 : #include <app/data-model-provider/ActionReturnStatus.h>
28 : #include <app/data-model-provider/MetadataLookup.h>
29 : #include <app/data-model-provider/MetadataTypes.h>
30 : #include <app/data-model-provider/Provider.h>
31 : #include <app/icd/server/ICDServerConfig.h>
32 : #include <app/reporting/Engine.h>
33 : #include <app/reporting/reporting.h>
34 : #include <app/util/MatterCallbacks.h>
35 : #include <lib/core/CHIPError.h>
36 : #include <lib/core/DataModelTypes.h>
37 : #include <lib/support/CodeUtils.h>
38 : #include <protocols/interaction_model/StatusCode.h>
39 :
40 : #include <optional>
41 :
42 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
43 : #include <app/icd/server/ICDNotifier.h> // nogncheck
44 : #endif
45 :
46 : using namespace chip::Access;
47 :
48 : namespace chip {
49 : namespace app {
50 : namespace reporting {
51 : namespace {
52 :
53 : using DataModel::ReadFlags;
54 : using Protocols::InteractionModel::Status;
55 :
56 : /// Returns the status of ACL validation.
57 : /// If the return value has a status set, that means the ACL check failed,
58 : /// the read must not be performed, and the returned status (which may
59 : /// be success, when dealing with non-concrete paths) should be used
60 : /// as the status for the read.
61 : ///
62 : /// If the returned value is std::nullopt, that means the ACL check passed and the
63 : /// read should proceed.
64 9848 : std::optional<CHIP_ERROR> ValidateReadAttributeACL(const SubjectDescriptor & subjectDescriptor,
65 : const ConcreteReadAttributePath & path, Privilege requiredPrivilege)
66 : {
67 :
68 9848 : RequestPath requestPath{ .cluster = path.mClusterId,
69 9848 : .endpoint = path.mEndpointId,
70 : .requestType = RequestType::kAttributeReadRequest,
71 9848 : .entityId = path.mAttributeId };
72 :
73 9848 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, requiredPrivilege);
74 9848 : if (err == CHIP_NO_ERROR)
75 : {
76 9847 : return std::nullopt;
77 : }
78 1 : VerifyOrReturnError((err == CHIP_ERROR_ACCESS_DENIED) || (err == CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL), err);
79 :
80 : // Implementation of 8.4.3.2 of the spec for path expansion
81 1 : if (path.mExpanded)
82 : {
83 0 : return CHIP_NO_ERROR;
84 : }
85 :
86 : // access denied and access restricted have specific codes for IM
87 1 : return err == CHIP_ERROR_ACCESS_DENIED ? CHIP_IM_GLOBAL_STATUS(UnsupportedAccess) : CHIP_IM_GLOBAL_STATUS(AccessRestricted);
88 : }
89 :
90 : /// Checks that the given attribute path corresponds to a readable attribute. If not, it
91 : /// will return the corresponding failure status.
92 4924 : std::optional<Status> ValidateAttributeIsReadable(DataModel::Provider * dataModel, const ConcreteReadAttributePath & path,
93 : const std::optional<DataModel::AttributeEntry> & entry)
94 : {
95 4924 : if (!entry.has_value())
96 : {
97 1 : return DataModel::ValidateClusterPath(dataModel, path, Status::UnsupportedAttribute);
98 : }
99 :
100 4923 : if (!entry->GetReadPrivilege().has_value())
101 : {
102 0 : return Status::UnsupportedRead;
103 : }
104 :
105 4923 : return std::nullopt;
106 : }
107 :
108 4925 : DataModel::ActionReturnStatus RetrieveClusterData(DataModel::Provider * dataModel, const SubjectDescriptor & subjectDescriptor,
109 : BitFlags<ReadFlags> flags, AttributeReportIBs::Builder & reportBuilder,
110 : const ConcreteReadAttributePath & path, AttributeEncodeState * encoderState)
111 : {
112 4925 : ChipLogDetail(DataManagement, "<RE:Run> Cluster %" PRIx32 ", Attribute %" PRIx32 " is dirty", path.mClusterId,
113 : path.mAttributeId);
114 4925 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
115 : DataModelCallbacks::OperationOrder::Pre, path);
116 :
117 4925 : DataModel::ReadAttributeRequest readRequest;
118 :
119 4925 : readRequest.readFlags = flags;
120 4925 : readRequest.subjectDescriptor = &subjectDescriptor;
121 4925 : readRequest.path = path;
122 :
123 4925 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
124 :
125 4925 : DataVersion version = 0;
126 4925 : if (auto clusterInfo = serverClusterFinder.Find(path); clusterInfo.has_value())
127 : {
128 4924 : version = clusterInfo->dataVersion;
129 : }
130 : else
131 : {
132 1 : ChipLogError(DataManagement, "Read request on unknown cluster - no data version available");
133 : }
134 :
135 4925 : TLV::TLVWriter checkpoint;
136 4925 : reportBuilder.Checkpoint(checkpoint);
137 :
138 4925 : DataModel::ActionReturnStatus status(CHIP_NO_ERROR);
139 4925 : bool isFabricFiltered = flags.Has(ReadFlags::kFabricFiltered);
140 4925 : AttributeValueEncoder attributeValueEncoder(reportBuilder, subjectDescriptor, path, version, isFabricFiltered, encoderState);
141 :
142 : // TODO: we explicitly DO NOT validate that path is a valid cluster path (even more, above serverClusterFinder
143 : // explicitly ignores that case).
144 : // Validation of attribute existence is done after ACL, in `ValidateAttributeIsReadable` below
145 : //
146 : // See https://github.com/project-chip/connectedhomeip/issues/37410
147 :
148 : // Execute the ACL Access Granting Algorithm before existence checks, assuming the required_privilege for the element is
149 : // View, to determine if the subject would have had at least some access against the concrete path. This is done so we don't
150 : // leak information if we do fail existence checks.
151 :
152 4925 : DataModel::AttributeFinder finder(dataModel);
153 4925 : std::optional<DataModel::AttributeEntry> entry = finder.Find(path);
154 :
155 4925 : if (auto access_status = ValidateReadAttributeACL(subjectDescriptor, path, Privilege::kView); access_status.has_value())
156 : {
157 1 : status = *access_status;
158 : }
159 4924 : else if (auto readable_status = ValidateAttributeIsReadable(dataModel, path, entry); readable_status.has_value())
160 : {
161 1 : status = *readable_status;
162 : }
163 : // Execute the ACL Access Granting Algorithm against the concrete path a second time, using the actual required_privilege.
164 : // entry->GetReadPrivilege() is guaranteed to have a value, since that condition is checked in the previous condition (inside
165 : // ValidateAttributeIsReadable()).
166 : // NOLINTNEXTLINE(bugprone-unchecked-optional-access)
167 9846 : else if (auto required_privilege_status = ValidateReadAttributeACL(subjectDescriptor, path, entry->GetReadPrivilege().value());
168 4923 : required_privilege_status.has_value())
169 : {
170 0 : status = *required_privilege_status;
171 : }
172 4923 : else if (IsSupportedGlobalAttributeNotInMetadata(readRequest.path.mAttributeId))
173 : {
174 : // Global attributes are NOT directly handled by data model providers, instead
175 : // they are routed through metadata.
176 1402 : status = ReadGlobalAttributeFromMetadata(dataModel, readRequest.path, attributeValueEncoder);
177 : }
178 : else
179 : {
180 3521 : status = dataModel->ReadAttribute(readRequest, attributeValueEncoder);
181 : }
182 :
183 4925 : if (status.IsSuccess())
184 : {
185 : // TODO: this callback being only executed on success is awkward. The Write callback is always done
186 : // for both read and write.
187 : //
188 : // For now this preserves existing/previous code logic, however we should consider to ALWAYS
189 : // call this.
190 4527 : DataModelCallbacks::GetInstance()->AttributeOperation(DataModelCallbacks::OperationType::Read,
191 : DataModelCallbacks::OperationOrder::Post, path);
192 4527 : return status;
193 : }
194 :
195 : // Encoder state is relevant for errors in case they are retryable.
196 : //
197 : // Generally only out of space encoding errors would be retryable, however we save the state
198 : // for all errors in case this is information that is useful (retry or error position).
199 398 : if (encoderState != nullptr)
200 : {
201 398 : *encoderState = attributeValueEncoder.GetState();
202 : }
203 :
204 : #if CHIP_CONFIG_DATA_MODEL_EXTRA_LOGGING
205 : // Out of space errors may be chunked data, reporting those cases would be very confusing
206 : // as they are not fully errors. Report only others (which presumably are not recoverable
207 : // and will be sent to the client as well).
208 398 : if (!status.IsOutOfSpaceEncodingResponse())
209 : {
210 2 : DataModel::ActionReturnStatus::StringStorage storage;
211 2 : ChipLogError(DataManagement, "Failed to read attribute: %s", status.c_str(storage));
212 : }
213 : #endif
214 398 : return status;
215 4925 : }
216 :
217 109 : bool IsClusterDataVersionEqualTo(DataModel::Provider * dataModel, const ConcreteClusterPath & path, DataVersion dataVersion)
218 : {
219 109 : DataModel::ServerClusterFinder serverClusterFinder(dataModel);
220 109 : auto info = serverClusterFinder.Find(path);
221 :
222 109 : return info.has_value() && (info->dataVersion == dataVersion);
223 109 : }
224 :
225 : /// Check if the given `err` is a known ACL error that can be translated into
226 : /// a StatusIB (UnsupportedAccess/AccessRestricted)
227 : ///
228 : /// Returns true if the error could be translated and places the result into `outStatus`.
229 : /// `path` is used for logging.
230 113 : bool IsTranslatableAclError(const ConcreteEventPath & path, const CHIP_ERROR & err, StatusIB & outStatus)
231 : {
232 113 : if ((err != CHIP_ERROR_ACCESS_DENIED) && (err != CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL))
233 : {
234 111 : return false;
235 : }
236 :
237 2 : ChipLogDetail(InteractionModel, "Access to event (%u, " ChipLogFormatMEI ", " ChipLogFormatMEI ") denied by %s",
238 : path.mEndpointId, ChipLogValueMEI(path.mClusterId), ChipLogValueMEI(path.mEventId),
239 : err == CHIP_ERROR_ACCESS_DENIED ? "ACL" : "ARL");
240 :
241 2 : outStatus = err == CHIP_ERROR_ACCESS_DENIED ? StatusIB(Status::UnsupportedAccess) : StatusIB(Status::AccessRestricted);
242 2 : return true;
243 : }
244 :
245 58 : CHIP_ERROR CheckEventValidity(const ConcreteEventPath & path, const SubjectDescriptor & subjectDescriptor,
246 : DataModel::Provider * provider, StatusIB & outStatus)
247 : {
248 : // We validate ACL before Path, however this means we do not want the real ACL check
249 : // to be blocked by a `Invalid endpoint id` error when checking event info.
250 : // As a result, we check for VIEW privilege on the cluster first (most permissive)
251 : // and will do a 2nd check for the actual required privilege as a followup.
252 58 : RequestPath requestPath{
253 58 : .cluster = path.mClusterId,
254 58 : .endpoint = path.mEndpointId,
255 : .requestType = RequestType::kEventReadRequest,
256 58 : .entityId = path.mEventId,
257 58 : };
258 58 : CHIP_ERROR err = GetAccessControl().Check(subjectDescriptor, requestPath, Access::Privilege::kView);
259 58 : if (IsTranslatableAclError(path, err, outStatus))
260 : {
261 2 : return CHIP_NO_ERROR;
262 : }
263 56 : ReturnErrorOnFailure(err);
264 :
265 : DataModel::EventEntry eventInfo;
266 56 : err = provider->EventInfo(path, eventInfo);
267 56 : if (err != CHIP_NO_ERROR)
268 : {
269 : // cannot get event data to validate. Event is not supported.
270 : // we still fall through into "ValidateClusterPath" to try to return a `better code`
271 : // (i.e. say invalid endpoint or cluster), however if path seems ok we will
272 : // return unsupported event as we failed to get event metadata.
273 1 : outStatus = StatusIB(DataModel::ValidateClusterPath(provider, path, Status::UnsupportedEvent));
274 1 : return CHIP_NO_ERROR;
275 : }
276 :
277 : // Although EventInfo() was successful, we still need to Validate Cluster Path since providers MAY return CHIP_NO_ERROR although
278 : // events are unknown.
279 55 : Status status = DataModel::ValidateClusterPath(provider, path, Status::Success);
280 55 : if (status != Status::Success)
281 : {
282 : // a valid status available: failure
283 0 : outStatus = StatusIB(status);
284 0 : return CHIP_NO_ERROR;
285 : }
286 :
287 : // Per spec, the required-privilege ACL check is performed only after path existence is validated
288 55 : err = GetAccessControl().Check(subjectDescriptor, requestPath, eventInfo.readPrivilege);
289 55 : if (IsTranslatableAclError(path, err, outStatus))
290 : {
291 0 : return CHIP_NO_ERROR;
292 : }
293 55 : ReturnErrorOnFailure(err);
294 :
295 : // set up the status as "OK" Since all above checks passed
296 55 : outStatus = StatusIB(Status::Success);
297 :
298 : // Status was set above = Success
299 55 : return CHIP_NO_ERROR;
300 : }
301 :
302 : } // namespace
303 :
304 82 : Engine::Engine(InteractionModelEngine * apImEngine) : mpImEngine(apImEngine) {}
305 :
306 438 : CHIP_ERROR Engine::Init(EventManagement * apEventManagement)
307 : {
308 438 : VerifyOrReturnError(apEventManagement != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
309 438 : mNumReportsInFlight = 0;
310 438 : mCurReadHandlerIdx = 0;
311 438 : mpEventManagement = apEventManagement;
312 :
313 438 : return CHIP_NO_ERROR;
314 : }
315 :
316 312 : void Engine::Shutdown()
317 : {
318 : // Flush out the event buffer synchronously
319 312 : ScheduleUrgentEventDeliverySync();
320 :
321 312 : mNumReportsInFlight = 0;
322 312 : mCurReadHandlerIdx = 0;
323 312 : mGlobalDirtySet.ReleaseAll();
324 312 : }
325 :
326 4722 : bool Engine::IsClusterDataVersionMatch(const SingleLinkedListNode<DataVersionFilter> * aDataVersionFilterList,
327 : const ConcreteReadAttributePath & aPath)
328 : {
329 4722 : bool existPathMatch = false;
330 4722 : bool existVersionMismatch = false;
331 43484 : for (auto filter = aDataVersionFilterList; filter != nullptr; filter = filter->mpNext)
332 : {
333 38762 : if (aPath.mEndpointId == filter->mValue.mEndpointId && aPath.mClusterId == filter->mValue.mClusterId)
334 : {
335 109 : existPathMatch = true;
336 :
337 109 : if (!IsClusterDataVersionEqualTo(mpImEngine->GetDataModelProvider(),
338 218 : ConcreteClusterPath(filter->mValue.mEndpointId, filter->mValue.mClusterId),
339 109 : filter->mValue.mDataVersion.Value()))
340 : {
341 79 : existVersionMismatch = true;
342 : }
343 : }
344 : }
345 4722 : return existPathMatch && !existVersionMismatch;
346 : }
347 :
348 2501 : static bool IsOutOfWriterSpaceError(CHIP_ERROR err)
349 : {
350 2501 : return err == CHIP_ERROR_NO_MEMORY || err == CHIP_ERROR_BUFFER_TOO_SMALL;
351 : }
352 :
353 1986 : CHIP_ERROR Engine::BuildSingleReportDataAttributeReportIBs(ReportDataMessage::Builder & aReportDataBuilder,
354 : ReadHandler * apReadHandler, bool * apHasMoreChunks,
355 : bool * apHasEncodedData)
356 : {
357 1986 : CHIP_ERROR err = CHIP_NO_ERROR;
358 1986 : bool attributeDataWritten = false;
359 1986 : bool hasMoreChunks = true;
360 1986 : TLV::TLVWriter backup;
361 1986 : const uint32_t kReservedSizeEndOfReportIBs = 1;
362 1986 : bool reservedEndOfReportIBs = false;
363 :
364 1986 : aReportDataBuilder.Checkpoint(backup);
365 :
366 1986 : AttributeReportIBs::Builder & attributeReportIBs = aReportDataBuilder.CreateAttributeReportIBs();
367 1986 : size_t emptyReportDataLength = 0;
368 :
369 1986 : SuccessOrExit(err = aReportDataBuilder.GetError());
370 :
371 1986 : emptyReportDataLength = attributeReportIBs.GetWriter()->GetLengthWritten();
372 : //
373 : // Reserve enough space for closing out the Report IB list
374 : //
375 1986 : SuccessOrExit(err = attributeReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
376 1986 : reservedEndOfReportIBs = true;
377 :
378 : {
379 : // TODO: Figure out how AttributePathExpandIterator should handle read
380 : // vs write paths.
381 1986 : ConcreteAttributePath readPath;
382 :
383 1986 : ChipLogDetail(DataManagement,
384 : "Building Reports for ReadHandler with LastReportGeneration = 0x" ChipLogFormatX64
385 : " DirtyGeneration = 0x" ChipLogFormatX64,
386 : ChipLogValueX64(apReadHandler->mPreviousReportsBeginGeneration),
387 : ChipLogValueX64(apReadHandler->mDirtyGeneration));
388 :
389 : // This ReadHandler is not generating reports, so we reset the iterator for a clean start.
390 1986 : if (!apReadHandler->IsReporting())
391 : {
392 1174 : apReadHandler->ResetPathIterator();
393 : }
394 :
395 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
396 1986 : uint32_t attributesRead = 0;
397 : #endif
398 :
399 : // For each path included in the interested path of the read handler...
400 1986 : for (RollbackAttributePathExpandIterator iterator(mpImEngine->GetDataModelProvider(),
401 1986 : apReadHandler->AttributeIterationPosition());
402 6970 : iterator.Next(readPath); iterator.MarkCompleted())
403 : {
404 5400 : if (!apReadHandler->IsPriming())
405 : {
406 678 : bool concretePathDirty = false;
407 : // TODO: Optimize this implementation by making the iterator only emit intersected paths.
408 678 : mGlobalDirtySet.ForEachActiveObject([&](auto * dirtyPath) {
409 815 : if (dirtyPath->IsAttributePathSupersetOf(readPath))
410 : {
411 : // We don't need to worry about paths that were already marked dirty before the last time this read handler
412 : // started a report that it completed: those paths already got reported.
413 252 : if (dirtyPath->mGeneration > apReadHandler->mPreviousReportsBeginGeneration)
414 : {
415 249 : concretePathDirty = true;
416 249 : return Loop::Break;
417 : }
418 : }
419 566 : return Loop::Continue;
420 : });
421 :
422 678 : if (!concretePathDirty)
423 : {
424 : // This attribute is not dirty, we just skip this one.
425 429 : continue;
426 : }
427 : }
428 : else
429 : {
430 4722 : if (IsClusterDataVersionMatch(apReadHandler->GetDataVersionFilterList(), readPath))
431 : {
432 26 : continue;
433 : }
434 : }
435 :
436 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
437 4945 : attributesRead++;
438 4945 : if (attributesRead > mMaxAttributesPerChunk)
439 : {
440 416 : ExitNow(err = CHIP_ERROR_BUFFER_TOO_SMALL);
441 : }
442 : #endif
443 :
444 : // If we are processing a read request, or the initial report of a subscription, just regard all paths as dirty
445 : // paths.
446 4925 : TLV::TLVWriter attributeBackup;
447 4925 : attributeReportIBs.Checkpoint(attributeBackup);
448 4925 : ConcreteReadAttributePath pathForRetrieval(readPath);
449 : // Load the saved state from previous encoding session for chunking of one single attribute (list chunking).
450 4925 : AttributeEncodeState encodeState = apReadHandler->GetAttributeEncodeState();
451 4925 : BitFlags<ReadFlags> flags;
452 4925 : flags.Set(ReadFlags::kFabricFiltered, apReadHandler->IsFabricFiltered());
453 4925 : flags.Set(ReadFlags::kAllowsLargePayload, apReadHandler->AllowsLargePayload());
454 : DataModel::ActionReturnStatus status =
455 4925 : RetrieveClusterData(mpImEngine->GetDataModelProvider(), apReadHandler->GetSubjectDescriptor(), flags,
456 : attributeReportIBs, pathForRetrieval, &encodeState);
457 4925 : if (status.IsError())
458 : {
459 : // Operation error set, since this will affect early return or override on status encoding
460 : // it will also be used for error reporting below.
461 398 : err = status.GetUnderlyingError();
462 :
463 : // If error is not an "out of writer space" error, rollback and encode status.
464 : // Otherwise, if partial data allowed, save the encode state.
465 : // Otherwise roll back. If we have already encoded some chunks, we are done; otherwise encode status.
466 :
467 398 : if (encodeState.AllowPartialData() && status.IsOutOfSpaceEncodingResponse())
468 : {
469 255 : ChipLogDetail(DataManagement,
470 : "List does not fit in packet, chunk between list items for clusterId: " ChipLogFormatMEI
471 : ", attributeId: " ChipLogFormatMEI,
472 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId));
473 : // Encoding is aborted but partial data is allowed, then we don't rollback and save the state for next chunk.
474 : // The expectation is that RetrieveClusterData has already reset attributeReportIBs to a good state (rolled
475 : // back any partially-written AttributeReportIB instances, reset its error status). Since AllowPartialData()
476 : // is true, we may not have encoded a complete attribute value, but we did, if we encoded anything, encode a
477 : // set of complete AttributeReportIB instances that represent part of the attribute value.
478 255 : apReadHandler->SetAttributeEncodeState(encodeState);
479 : }
480 : else
481 : {
482 : // We met a error during writing reports, one common case is we are running out of buffer, rollback the
483 : // attributeReportIB to avoid any partial data.
484 143 : attributeReportIBs.Rollback(attributeBackup);
485 143 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
486 :
487 143 : if (!status.IsOutOfSpaceEncodingResponse())
488 : {
489 2 : ChipLogError(DataManagement,
490 : "Fail to retrieve data, roll back and encode status on clusterId: " ChipLogFormatMEI
491 : ", attributeId: " ChipLogFormatMEI "err = %" CHIP_ERROR_FORMAT,
492 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
493 : err.Format());
494 : // Try to encode our error as a status response.
495 2 : err = attributeReportIBs.EncodeAttributeStatus(pathForRetrieval, StatusIB(status.GetStatusCode()));
496 2 : if (err != CHIP_NO_ERROR)
497 : {
498 : // OK, just roll back again and give up; if we still ran out of space we
499 : // will send this status response in the next chunk.
500 0 : attributeReportIBs.Rollback(attributeBackup);
501 : }
502 : }
503 : else
504 : {
505 141 : ChipLogDetail(DataManagement,
506 : "Next attribute value does not fit in packet, roll back on clusterId: " ChipLogFormatMEI
507 : ", attributeId: " ChipLogFormatMEI ", err = %" CHIP_ERROR_FORMAT,
508 : ChipLogValueMEI(pathForRetrieval.mClusterId), ChipLogValueMEI(pathForRetrieval.mAttributeId),
509 : err.Format());
510 : }
511 : }
512 : }
513 4925 : SuccessOrExit(err);
514 : // Successfully encoded the attribute, clear the internal state.
515 4529 : apReadHandler->SetAttributeEncodeState(AttributeEncodeState());
516 1986 : }
517 :
518 : // We just visited all paths interested by this read handler and did not abort in the middle of iteration, there are no more
519 : // chunks for this report.
520 1570 : hasMoreChunks = false;
521 : }
522 1986 : exit:
523 1986 : if (attributeReportIBs.GetWriter()->GetLengthWritten() != emptyReportDataLength)
524 : {
525 : // We may encounter BUFFER_TOO_SMALL with nothing actually written for the case of list chunking, so we check if we have
526 : // actually
527 1309 : attributeDataWritten = true;
528 : }
529 :
530 1986 : if (apHasEncodedData != nullptr)
531 : {
532 1986 : *apHasEncodedData = attributeDataWritten;
533 : }
534 : //
535 : // Running out of space is an error that we're expected to handle - the incompletely written DataIB has already been rolled back
536 : // earlier to ensure only whole and complete DataIBs are present in the stream.
537 : //
538 : // We can safely clear out the error so that the rest of the machinery to close out the reports, etc. will function correctly.
539 : // These are are guaranteed to not fail since we've already reserved memory for the remaining 'close out' TLV operations in this
540 : // function and its callers.
541 : //
542 1986 : if (IsOutOfWriterSpaceError(err) && reservedEndOfReportIBs)
543 : {
544 416 : ChipLogDetail(DataManagement, "<RE:Run> We cannot put more chunks into this report. Enable chunking.");
545 416 : err = CHIP_NO_ERROR;
546 : }
547 :
548 : //
549 : // Only close out the report if we haven't hit an error yet so far.
550 : //
551 1986 : if (err == CHIP_NO_ERROR)
552 : {
553 1986 : attributeReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs);
554 :
555 1986 : err = attributeReportIBs.EndOfAttributeReportIBs();
556 :
557 : //
558 : // We reserved space for this earlier - consequently, the call to end the ReportIBs should
559 : // never fail, so assert if we do since that's a logic bug.
560 : //
561 1986 : VerifyOrDie(err == CHIP_NO_ERROR);
562 : }
563 :
564 : //
565 : // Rollback the the entire ReportIB array if we never wrote any attributes
566 : // AND never hit an error.
567 : //
568 1986 : if (!attributeDataWritten && err == CHIP_NO_ERROR)
569 : {
570 677 : aReportDataBuilder.Rollback(backup);
571 : }
572 :
573 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
574 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
575 : // set.
576 1986 : if (apHasMoreChunks != nullptr)
577 : {
578 1986 : *apHasMoreChunks = hasMoreChunks;
579 : }
580 :
581 1986 : return err;
582 : }
583 :
584 864 : CHIP_ERROR Engine::CheckAccessDeniedEventPaths(TLV::TLVWriter & aWriter, bool & aHasEncodedData, ReadHandler * apReadHandler)
585 : {
586 : using Protocols::InteractionModel::Status;
587 :
588 864 : CHIP_ERROR err = CHIP_NO_ERROR;
589 1759 : for (auto current = apReadHandler->mpEventPathList; current != nullptr;)
590 : {
591 895 : if (current->mValue.IsWildcardPath())
592 : {
593 837 : current = current->mpNext;
594 837 : continue;
595 : }
596 :
597 58 : ConcreteEventPath path(current->mValue.mEndpointId, current->mValue.mClusterId, current->mValue.mEventId);
598 :
599 58 : StatusIB statusIB;
600 :
601 58 : ReturnErrorOnFailure(
602 : CheckEventValidity(path, apReadHandler->GetSubjectDescriptor(), mpImEngine->GetDataModelProvider(), statusIB));
603 :
604 58 : if (statusIB.IsFailure())
605 : {
606 3 : TLV::TLVWriter checkpoint = aWriter;
607 3 : err = EventReportIB::ConstructEventStatusIB(aWriter, path, statusIB);
608 3 : if (err != CHIP_NO_ERROR)
609 : {
610 0 : aWriter = checkpoint;
611 0 : break;
612 : }
613 3 : aHasEncodedData = true;
614 : }
615 :
616 58 : current = current->mpNext;
617 : }
618 :
619 864 : return err;
620 : }
621 :
622 1986 : CHIP_ERROR Engine::BuildSingleReportDataEventReports(ReportDataMessage::Builder & aReportDataBuilder, ReadHandler * apReadHandler,
623 : bool aBufferIsUsed, bool * apHasMoreChunks, bool * apHasEncodedData)
624 : {
625 1986 : CHIP_ERROR err = CHIP_NO_ERROR;
626 1986 : size_t eventCount = 0;
627 1986 : bool hasEncodedStatus = false;
628 1986 : TLV::TLVWriter backup;
629 1986 : bool eventClean = true;
630 1986 : auto & eventMin = apReadHandler->GetEventMin();
631 1986 : bool hasMoreChunks = false;
632 :
633 1986 : aReportDataBuilder.Checkpoint(backup);
634 :
635 1986 : VerifyOrExit(apReadHandler->GetEventPathList() != nullptr, );
636 :
637 : // If the mpEventManagement is not valid or has not been initialized,
638 : // skip the rest of processing
639 891 : VerifyOrExit(mpEventManagement != nullptr && mpEventManagement->IsValid(),
640 : ChipLogError(DataManagement, "EventManagement has not yet initialized"));
641 :
642 888 : eventClean = apReadHandler->CheckEventClean(*mpEventManagement);
643 :
644 : // proceed only if there are new events.
645 888 : if (eventClean)
646 : {
647 24 : ExitNow(); // Read clean, move along
648 : }
649 :
650 : {
651 : // Just like what we do in BuildSingleReportDataAttributeReportIBs(), we need to reserve one byte for end of container tag
652 : // when encoding events to ensure we can close the container successfully.
653 864 : const uint32_t kReservedSizeEndOfReportIBs = 1;
654 864 : EventReportIBs::Builder & eventReportIBs = aReportDataBuilder.CreateEventReports();
655 864 : SuccessOrExit(err = aReportDataBuilder.GetError());
656 864 : VerifyOrExit(eventReportIBs.GetWriter() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
657 864 : SuccessOrExit(err = eventReportIBs.GetWriter()->ReserveBuffer(kReservedSizeEndOfReportIBs));
658 :
659 864 : err = CheckAccessDeniedEventPaths(*(eventReportIBs.GetWriter()), hasEncodedStatus, apReadHandler);
660 864 : SuccessOrExit(err);
661 :
662 864 : err = mpEventManagement->FetchEventsSince(*(eventReportIBs.GetWriter()), apReadHandler->GetEventPathList(), eventMin,
663 864 : eventCount, apReadHandler->GetSubjectDescriptor());
664 :
665 864 : if ((err == CHIP_END_OF_TLV) || (err == CHIP_ERROR_TLV_UNDERRUN) || (err == CHIP_NO_ERROR))
666 : {
667 349 : err = CHIP_NO_ERROR;
668 349 : hasMoreChunks = false;
669 : }
670 515 : else if (IsOutOfWriterSpaceError(err))
671 : {
672 : // when first cluster event is too big to fit in the packet, ignore that cluster event.
673 : // However, we may have encoded some attributes before, we don't skip it in that case.
674 515 : if (eventCount == 0)
675 : {
676 206 : if (!aBufferIsUsed)
677 : {
678 0 : eventMin++;
679 : }
680 206 : ChipLogDetail(DataManagement, "<RE:Run> first cluster event is too big so that it fails to fit in the packet!");
681 206 : err = CHIP_NO_ERROR;
682 : }
683 : else
684 : {
685 : // `FetchEventsSince` has filled the available space
686 : // within the allowed buffer before it fit all the
687 : // available events. This is an expected condition,
688 : // so we do not propagate the error to higher levels;
689 : // instead, we terminate the event processing for now
690 309 : err = CHIP_NO_ERROR;
691 : }
692 515 : hasMoreChunks = true;
693 : }
694 : else
695 : {
696 : // All other errors are propagated to higher level.
697 : // Exiting here and returning an error will lead to
698 : // abandoning subscription.
699 0 : ExitNow();
700 : }
701 :
702 864 : SuccessOrExit(err = eventReportIBs.GetWriter()->UnreserveBuffer(kReservedSizeEndOfReportIBs));
703 864 : SuccessOrExit(err = eventReportIBs.EndOfEventReports());
704 : }
705 864 : ChipLogDetail(DataManagement, "Fetched %u events", static_cast<unsigned int>(eventCount));
706 :
707 0 : exit:
708 1986 : if (apHasEncodedData != nullptr)
709 : {
710 1986 : *apHasEncodedData = hasEncodedStatus || (eventCount != 0);
711 : }
712 :
713 : // Maybe encoding the attributes has already used up all space.
714 1986 : if ((err == CHIP_NO_ERROR || IsOutOfWriterSpaceError(err)) && !(hasEncodedStatus || (eventCount != 0)))
715 : {
716 1345 : aReportDataBuilder.Rollback(backup);
717 1345 : err = CHIP_NO_ERROR;
718 : }
719 :
720 : // hasMoreChunks + no data encoded is a flag that we have encountered some trouble when processing the attribute.
721 : // BuildAndSendSingleReportData will abort the read transaction if we encoded no attribute and no events but hasMoreChunks is
722 : // set.
723 1986 : if (apHasMoreChunks != nullptr)
724 : {
725 1986 : *apHasMoreChunks = hasMoreChunks;
726 : }
727 1986 : return err;
728 : }
729 :
730 1986 : CHIP_ERROR Engine::BuildAndSendSingleReportData(ReadHandler * apReadHandler)
731 : {
732 1986 : CHIP_ERROR err = CHIP_NO_ERROR;
733 1986 : System::PacketBufferTLVWriter reportDataWriter;
734 1986 : ReportDataMessage::Builder reportDataBuilder;
735 1986 : System::PacketBufferHandle bufHandle = nullptr;
736 1986 : uint16_t reservedSize = 0;
737 1986 : bool hasMoreChunks = false;
738 1986 : bool needCloseReadHandler = false;
739 1986 : size_t reportBufferMaxSize = 0;
740 :
741 : // Reserved size for the MoreChunks boolean flag, which takes up 1 byte for the control tag and 1 byte for the context tag.
742 1986 : const uint32_t kReservedSizeForMoreChunksFlag = 1 + 1;
743 :
744 : // Reserved size for the uint8_t InteractionModelRevision flag, which takes up 1 byte for the control tag and 1 byte for the
745 : // context tag, 1 byte for value
746 1986 : const uint32_t kReservedSizeForIMRevision = 1 + 1 + 1;
747 :
748 : // Reserved size for the end of report message, which is an end-of-container (i.e 1 byte for the control tag).
749 1986 : const uint32_t kReservedSizeForEndOfReportMessage = 1;
750 :
751 : // Reserved size for an empty EventReportIBs, so we can at least check if there are any events need to be reported.
752 1986 : const uint32_t kReservedSizeForEventReportIBs = 3; // type, tag, end of container
753 :
754 1986 : VerifyOrExit(apReadHandler != nullptr, err = CHIP_ERROR_INVALID_ARGUMENT);
755 1986 : VerifyOrExit(apReadHandler->GetSession() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
756 :
757 1986 : reportBufferMaxSize = apReadHandler->GetReportBufferMaxSize();
758 :
759 1986 : bufHandle = System::PacketBufferHandle::New(reportBufferMaxSize);
760 1986 : VerifyOrExit(!bufHandle.IsNull(), err = CHIP_ERROR_NO_MEMORY);
761 :
762 1986 : if (bufHandle->AvailableDataLength() > reportBufferMaxSize)
763 : {
764 0 : reservedSize = static_cast<uint16_t>(bufHandle->AvailableDataLength() - reportBufferMaxSize);
765 : }
766 :
767 1986 : reportDataWriter.Init(std::move(bufHandle));
768 :
769 : #if CONFIG_BUILD_FOR_HOST_UNIT_TEST
770 1986 : reportDataWriter.ReserveBuffer(mReservedSize);
771 : #endif
772 :
773 : // Always limit the size of the generated packet to fit within the max size returned by the ReadHandler regardless
774 : // of the available buffer capacity.
775 : // Also, we need to reserve some extra space for the MIC field.
776 1986 : reportDataWriter.ReserveBuffer(static_cast<uint32_t>(reservedSize + Crypto::CHIP_CRYPTO_AEAD_MIC_LENGTH_BYTES));
777 :
778 : // Create a report data.
779 1986 : err = reportDataBuilder.Init(&reportDataWriter);
780 1986 : SuccessOrExit(err);
781 :
782 1986 : if (apReadHandler->IsType(ReadHandler::InteractionType::Subscribe))
783 : {
784 : #if CHIP_CONFIG_ENABLE_ICD_SERVER
785 : // Notify the ICDManager that we are about to send a subscription report before we prepare the Report payload.
786 : // This allows the ICDManager to trigger any necessary updates and have the information in the report about to be sent.
787 : app::ICDNotifier::GetInstance().NotifySubscriptionReport();
788 : #endif // CHIP_CONFIG_ENABLE_ICD_SERVER
789 :
790 441 : SubscriptionId subscriptionId = 0;
791 441 : apReadHandler->GetSubscriptionId(subscriptionId);
792 441 : reportDataBuilder.SubscriptionId(subscriptionId);
793 : }
794 :
795 1986 : SuccessOrExit(err = reportDataWriter.ReserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
796 : kReservedSizeForEndOfReportMessage + kReservedSizeForEventReportIBs));
797 :
798 : {
799 1986 : bool hasMoreChunksForAttributes = false;
800 1986 : bool hasMoreChunksForEvents = false;
801 1986 : bool hasEncodedAttributes = false;
802 1986 : bool hasEncodedEvents = false;
803 :
804 1986 : err = BuildSingleReportDataAttributeReportIBs(reportDataBuilder, apReadHandler, &hasMoreChunksForAttributes,
805 : &hasEncodedAttributes);
806 2017 : SuccessOrExit(err);
807 1986 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForEventReportIBs));
808 1986 : err = BuildSingleReportDataEventReports(reportDataBuilder, apReadHandler, hasEncodedAttributes, &hasMoreChunksForEvents,
809 : &hasEncodedEvents);
810 1986 : SuccessOrExit(err);
811 :
812 1986 : hasMoreChunks = hasMoreChunksForAttributes || hasMoreChunksForEvents;
813 :
814 1986 : if (!hasEncodedAttributes && !hasEncodedEvents && hasMoreChunks)
815 : {
816 31 : ChipLogError(DataManagement,
817 : "No data actually encoded but hasMoreChunks flag is set, close read handler! (attribute too big?)");
818 31 : err = apReadHandler->SendStatusReport(Protocols::InteractionModel::Status::ResourceExhausted);
819 31 : if (err == CHIP_NO_ERROR)
820 : {
821 31 : needCloseReadHandler = true;
822 : }
823 31 : ExitNow();
824 : }
825 : }
826 :
827 1955 : SuccessOrExit(err = reportDataBuilder.GetError());
828 1955 : SuccessOrExit(err = reportDataWriter.UnreserveBuffer(kReservedSizeForMoreChunksFlag + kReservedSizeForIMRevision +
829 : kReservedSizeForEndOfReportMessage));
830 1955 : if (hasMoreChunks)
831 : {
832 866 : reportDataBuilder.MoreChunkedMessages(true);
833 : }
834 1089 : else if (apReadHandler->IsType(ReadHandler::InteractionType::Read))
835 : {
836 706 : reportDataBuilder.SuppressResponse(true);
837 : }
838 :
839 1955 : reportDataBuilder.EndOfReportDataMessage();
840 :
841 : //
842 : // Since we've already reserved space for both the MoreChunked/SuppressResponse flags, as well as
843 : // the end-of-container flag for the end of the report, we should never hit an error closing out the message.
844 : //
845 1955 : VerifyOrDie(reportDataBuilder.GetError() == CHIP_NO_ERROR);
846 :
847 1955 : err = reportDataWriter.Finalize(&bufHandle);
848 1955 : SuccessOrExit(err);
849 :
850 1955 : ChipLogDetail(DataManagement, "<RE> Sending report (payload has %" PRIu32 " bytes)...", reportDataWriter.GetLengthWritten());
851 1955 : err = SendReport(apReadHandler, std::move(bufHandle), hasMoreChunks);
852 1955 : VerifyOrExit(err == CHIP_NO_ERROR,
853 : ChipLogError(DataManagement, "<RE> Error sending out report data with %" CHIP_ERROR_FORMAT "!", err.Format()));
854 :
855 1951 : ChipLogDetail(DataManagement, "<RE> ReportsInFlight = %" PRIu32 " with readHandler %" PRIu32 ", RE has %s", mNumReportsInFlight,
856 : mCurReadHandlerIdx, hasMoreChunks ? "more messages" : "no more messages");
857 :
858 0 : exit:
859 1986 : if (err != CHIP_NO_ERROR || (apReadHandler->IsType(ReadHandler::InteractionType::Read) && !hasMoreChunks) ||
860 : needCloseReadHandler)
861 : {
862 : //
863 : // In the case of successful report generation and we're on the last chunk of a read, we don't expect
864 : // any further activity on this exchange. The EC layer will automatically close our EC, so shutdown the ReadHandler
865 : // gracefully.
866 : //
867 739 : apReadHandler->Close();
868 : }
869 :
870 3972 : return err;
871 1986 : }
872 :
873 1764 : void Engine::Run(System::Layer * aSystemLayer, void * apAppState)
874 : {
875 1764 : Engine * const pEngine = reinterpret_cast<Engine *>(apAppState);
876 1764 : pEngine->mRunScheduled = false;
877 1764 : pEngine->Run();
878 1764 : }
879 :
880 2161 : CHIP_ERROR Engine::ScheduleRun()
881 : {
882 2161 : if (IsRunScheduled())
883 : {
884 397 : return CHIP_NO_ERROR;
885 : }
886 :
887 1764 : Messaging::ExchangeManager * exchangeManager = mpImEngine->GetExchangeManager();
888 1764 : if (exchangeManager == nullptr)
889 : {
890 0 : return CHIP_ERROR_INCORRECT_STATE;
891 : }
892 1764 : SessionManager * sessionManager = exchangeManager->GetSessionManager();
893 1764 : if (sessionManager == nullptr)
894 : {
895 0 : return CHIP_ERROR_INCORRECT_STATE;
896 : }
897 1764 : System::Layer * systemLayer = sessionManager->SystemLayer();
898 1764 : if (systemLayer == nullptr)
899 : {
900 0 : return CHIP_ERROR_INCORRECT_STATE;
901 : }
902 1764 : ReturnErrorOnFailure(systemLayer->ScheduleWork(Run, this));
903 1764 : mRunScheduled = true;
904 1764 : return CHIP_NO_ERROR;
905 : }
906 :
907 2076 : void Engine::Run()
908 : {
909 2076 : uint32_t numReadHandled = 0;
910 :
911 : // We may be deallocating read handlers as we go. Track how many we had
912 : // initially, so we make sure to go through all of them.
913 2076 : size_t initialAllocated = mpImEngine->mReadHandlers.Allocated();
914 4300 : while ((mNumReportsInFlight < CHIP_IM_MAX_REPORTS_IN_FLIGHT) && (numReadHandled < initialAllocated))
915 : {
916 : ReadHandler * readHandler =
917 2228 : mpImEngine->ActiveHandlerAt(mCurReadHandlerIdx % (uint32_t) mpImEngine->mReadHandlers.Allocated());
918 2228 : VerifyOrDie(readHandler != nullptr);
919 :
920 2228 : if (readHandler->ShouldReportUnscheduled() || mpImEngine->GetReportScheduler()->IsReportableNow(readHandler))
921 : {
922 :
923 1985 : mRunningReadHandler = readHandler;
924 1985 : CHIP_ERROR err = BuildAndSendSingleReportData(readHandler);
925 1985 : mRunningReadHandler = nullptr;
926 1985 : if (err != CHIP_NO_ERROR)
927 : {
928 4 : return;
929 : }
930 : }
931 :
932 2224 : numReadHandled++;
933 : // If readHandler removed itself from our list, we also decremented
934 : // mCurReadHandlerIdx to account for that removal, so it's safe to
935 : // increment here.
936 2224 : mCurReadHandlerIdx++;
937 : }
938 :
939 : //
940 : // If our tracker has exceeded the bounds of the handler list, reset it back to 0.
941 : // This isn't strictly necessary, but does make it easier to debug issues in this code if they
942 : // do arise.
943 : //
944 2072 : if (mCurReadHandlerIdx >= mpImEngine->mReadHandlers.Allocated())
945 : {
946 2015 : mCurReadHandlerIdx = 0;
947 : }
948 :
949 2072 : bool allReadClean = true;
950 :
951 2072 : mpImEngine->mReadHandlers.ForEachActiveObject([&allReadClean](ReadHandler * handler) {
952 2878 : if (handler->IsDirty())
953 : {
954 868 : allReadClean = false;
955 868 : return Loop::Break;
956 : }
957 :
958 2010 : return Loop::Continue;
959 : });
960 :
961 2072 : if (allReadClean)
962 : {
963 1204 : ChipLogDetail(DataManagement, "All ReadHandler-s are clean, clear GlobalDirtySet");
964 :
965 1204 : mGlobalDirtySet.ReleaseAll();
966 : }
967 : }
968 :
969 276 : bool Engine::MergeOverlappedAttributePath(const AttributePathParams & aAttributePath)
970 : {
971 276 : return Loop::Break == mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
972 214 : if (path->IsAttributePathSupersetOf(aAttributePath))
973 : {
974 112 : path->mGeneration = GetDirtySetGeneration();
975 112 : return Loop::Break;
976 : }
977 102 : if (aAttributePath.IsAttributePathSupersetOf(*path))
978 : {
979 : // TODO: the wildcard input path may be superset of next paths in globalDirtySet, it is fine at this moment, since
980 : // when building report, it would use the first path of globalDirtySet to compare against interested paths read clients
981 : // want.
982 : // It is better to eliminate the duplicate wildcard paths in follow-up
983 2 : path->mGeneration = GetDirtySetGeneration();
984 2 : path->mEndpointId = aAttributePath.mEndpointId;
985 2 : path->mClusterId = aAttributePath.mClusterId;
986 2 : path->mListIndex = aAttributePath.mListIndex;
987 2 : path->mAttributeId = aAttributePath.mAttributeId;
988 2 : return Loop::Break;
989 : }
990 100 : return Loop::Continue;
991 276 : });
992 : }
993 :
994 8 : bool Engine::ClearTombPaths()
995 : {
996 8 : bool pathReleased = false;
997 8 : mGlobalDirtySet.ForEachActiveObject([&](auto * path) {
998 64 : if (path->mGeneration == 0)
999 : {
1000 28 : mGlobalDirtySet.ReleaseObject(path);
1001 28 : pathReleased = true;
1002 : }
1003 64 : return Loop::Continue;
1004 : });
1005 8 : return pathReleased;
1006 : }
1007 :
1008 5 : bool Engine::MergeDirtyPathsUnderSameCluster()
1009 : {
1010 5 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1011 40 : if (outerPath->HasWildcardClusterId() || outerPath->mGeneration == 0)
1012 : {
1013 14 : return Loop::Continue;
1014 : }
1015 26 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1016 208 : if (innerPath == outerPath)
1017 : {
1018 26 : return Loop::Continue;
1019 : }
1020 : // We don't support paths with a wildcard endpoint + a concrete cluster in global dirty set, so we do a simple == check
1021 : // here.
1022 182 : if (innerPath->mEndpointId != outerPath->mEndpointId || innerPath->mClusterId != outerPath->mClusterId)
1023 : {
1024 168 : return Loop::Continue;
1025 : }
1026 14 : if (innerPath->mGeneration > outerPath->mGeneration)
1027 : {
1028 0 : outerPath->mGeneration = innerPath->mGeneration;
1029 : }
1030 14 : outerPath->SetWildcardAttributeId();
1031 :
1032 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1033 : // generation to 0 and then clear it later.
1034 14 : innerPath->mGeneration = 0;
1035 14 : return Loop::Continue;
1036 : });
1037 26 : return Loop::Continue;
1038 : });
1039 :
1040 5 : return ClearTombPaths();
1041 : }
1042 :
1043 3 : bool Engine::MergeDirtyPathsUnderSameEndpoint()
1044 : {
1045 3 : mGlobalDirtySet.ForEachActiveObject([&](auto * outerPath) {
1046 24 : if (outerPath->HasWildcardEndpointId() || outerPath->mGeneration == 0)
1047 : {
1048 14 : return Loop::Continue;
1049 : }
1050 10 : mGlobalDirtySet.ForEachActiveObject([&](auto * innerPath) {
1051 80 : if (innerPath == outerPath)
1052 : {
1053 10 : return Loop::Continue;
1054 : }
1055 70 : if (innerPath->mEndpointId != outerPath->mEndpointId)
1056 : {
1057 56 : return Loop::Continue;
1058 : }
1059 14 : if (innerPath->mGeneration > outerPath->mGeneration)
1060 : {
1061 0 : outerPath->mGeneration = innerPath->mGeneration;
1062 : }
1063 14 : outerPath->SetWildcardClusterId();
1064 14 : outerPath->SetWildcardAttributeId();
1065 :
1066 : // The object pool does not allow us to release objects in a nested iteration, mark the path as a tomb by setting its
1067 : // generation to 0 and then clear it later.
1068 14 : innerPath->mGeneration = 0;
1069 14 : return Loop::Continue;
1070 : });
1071 10 : return Loop::Continue;
1072 : });
1073 3 : return ClearTombPaths();
1074 : }
1075 :
1076 189 : CHIP_ERROR Engine::InsertPathIntoDirtySet(const AttributePathParams & aAttributePath)
1077 : {
1078 189 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1079 :
1080 82 : if (mGlobalDirtySet.Exhausted() && !MergeDirtyPathsUnderSameCluster() && !MergeDirtyPathsUnderSameEndpoint())
1081 : {
1082 1 : ChipLogDetail(DataManagement, "Global dirty set pool exhausted, merge all paths.");
1083 1 : mGlobalDirtySet.ReleaseAll();
1084 1 : auto object = mGlobalDirtySet.CreateObject();
1085 1 : object->mGeneration = GetDirtySetGeneration();
1086 : }
1087 :
1088 82 : VerifyOrReturnError(!MergeOverlappedAttributePath(aAttributePath), CHIP_NO_ERROR);
1089 79 : ChipLogDetail(DataManagement, "Cannot merge the new path into any existing path, create one.");
1090 :
1091 79 : auto object = mGlobalDirtySet.CreateObject();
1092 79 : if (object == nullptr)
1093 : {
1094 : // This should not happen, this path should be merged into the wildcard endpoint at least.
1095 0 : ChipLogError(DataManagement, "mGlobalDirtySet pool full, cannot handle more entries!");
1096 0 : return CHIP_ERROR_NO_MEMORY;
1097 : }
1098 79 : *object = aAttributePath;
1099 79 : object->mGeneration = GetDirtySetGeneration();
1100 :
1101 79 : return CHIP_NO_ERROR;
1102 : }
1103 :
1104 5412 : CHIP_ERROR Engine::SetDirty(const AttributePathParams & aAttributePath)
1105 : {
1106 5412 : BumpDirtySetGeneration();
1107 :
1108 5412 : bool intersectsInterestPath = false;
1109 5412 : DataModel::Provider * dataModel = mpImEngine->GetDataModelProvider();
1110 5412 : mpImEngine->mReadHandlers.ForEachActiveObject([&dataModel, &aAttributePath, &intersectsInterestPath](ReadHandler * handler) {
1111 : // We call AttributePathIsDirty for both read interactions and subscribe interactions, since we may send inconsistent
1112 : // attribute data between two chunks. AttributePathIsDirty will not schedule a new run for read handlers which are
1113 : // waiting for a response to the last message chunk for read interactions.
1114 477 : if (handler->CanStartReporting() || handler->IsAwaitingReportResponse())
1115 : {
1116 934 : for (auto object = handler->GetAttributePathList(); object != nullptr; object = object->mpNext)
1117 : {
1118 802 : if (object->mValue.Intersects(aAttributePath))
1119 : {
1120 345 : handler->AttributePathIsDirty(dataModel, aAttributePath);
1121 345 : intersectsInterestPath = true;
1122 345 : break;
1123 : }
1124 : }
1125 : }
1126 :
1127 477 : return Loop::Continue;
1128 : });
1129 :
1130 5412 : if (!intersectsInterestPath)
1131 : {
1132 5228 : return CHIP_NO_ERROR;
1133 : }
1134 184 : ReturnErrorOnFailure(InsertPathIntoDirtySet(aAttributePath));
1135 :
1136 184 : return CHIP_NO_ERROR;
1137 : }
1138 :
1139 1955 : CHIP_ERROR Engine::SendReport(ReadHandler * apReadHandler, System::PacketBufferHandle && aPayload, bool aHasMoreChunks)
1140 : {
1141 1955 : CHIP_ERROR err = CHIP_NO_ERROR;
1142 :
1143 : // We can only have 1 report in flight for any given read - increment and break out.
1144 1955 : mNumReportsInFlight++;
1145 1955 : err = apReadHandler->SendReportData(std::move(aPayload), aHasMoreChunks);
1146 1955 : if (err != CHIP_NO_ERROR)
1147 : {
1148 4 : --mNumReportsInFlight;
1149 : }
1150 1955 : return err;
1151 : }
1152 :
1153 1951 : void Engine::OnReportConfirm()
1154 : {
1155 1951 : VerifyOrDie(mNumReportsInFlight > 0);
1156 :
1157 1951 : if (mNumReportsInFlight == CHIP_IM_MAX_REPORTS_IN_FLIGHT)
1158 : {
1159 : // We could have other things waiting to go now that this report is no
1160 : // longer in flight.
1161 61 : ScheduleRun();
1162 : }
1163 1951 : mNumReportsInFlight--;
1164 1951 : ChipLogDetail(DataManagement, "<RE> OnReportConfirm: NumReports = %" PRIu32, mNumReportsInFlight);
1165 1951 : }
1166 :
1167 20 : void Engine::GetMinEventLogPosition(uint32_t & aMinLogPosition)
1168 : {
1169 20 : mpImEngine->mReadHandlers.ForEachActiveObject([&aMinLogPosition](ReadHandler * handler) {
1170 20 : if (handler->IsType(ReadHandler::InteractionType::Read))
1171 : {
1172 0 : return Loop::Continue;
1173 : }
1174 :
1175 20 : uint32_t initialWrittenEventsBytes = handler->GetLastWrittenEventsBytes();
1176 20 : if (initialWrittenEventsBytes < aMinLogPosition)
1177 : {
1178 20 : aMinLogPosition = initialWrittenEventsBytes;
1179 : }
1180 :
1181 20 : return Loop::Continue;
1182 : });
1183 20 : }
1184 :
1185 20 : CHIP_ERROR Engine::ScheduleBufferPressureEventDelivery(uint32_t aBytesWritten)
1186 : {
1187 20 : uint32_t minEventLogPosition = aBytesWritten;
1188 20 : GetMinEventLogPosition(minEventLogPosition);
1189 20 : if (aBytesWritten - minEventLogPosition > CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD)
1190 : {
1191 0 : ChipLogDetail(DataManagement, "<RE> Buffer overfilled CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD %d, schedule engine run",
1192 : CHIP_CONFIG_EVENT_LOGGING_BYTE_THRESHOLD);
1193 0 : return ScheduleRun();
1194 : }
1195 20 : return CHIP_NO_ERROR;
1196 : }
1197 :
1198 665 : CHIP_ERROR Engine::NewEventGenerated(ConcreteEventPath & aPath, uint32_t aBytesConsumed)
1199 : {
1200 : // If we literally have no read handlers right now that care about any events,
1201 : // we don't need to call schedule run for event.
1202 : // If schedule run is called, actually we would not delivery events as well.
1203 : // Just wanna save one schedule run here
1204 665 : if (mpImEngine->mEventPathPool.Allocated() == 0)
1205 : {
1206 633 : return CHIP_NO_ERROR;
1207 : }
1208 :
1209 32 : bool isUrgentEvent = false;
1210 32 : mpImEngine->mReadHandlers.ForEachActiveObject([&aPath, &isUrgentEvent](ReadHandler * handler) {
1211 40 : if (handler->IsType(ReadHandler::InteractionType::Read))
1212 : {
1213 0 : return Loop::Continue;
1214 : }
1215 :
1216 104 : for (auto * interestedPath = handler->GetEventPathList(); interestedPath != nullptr;
1217 64 : interestedPath = interestedPath->mpNext)
1218 : {
1219 76 : if (interestedPath->mValue.IsEventPathSupersetOf(aPath) && interestedPath->mValue.mIsUrgentEvent)
1220 : {
1221 12 : isUrgentEvent = true;
1222 12 : handler->ForceDirtyState();
1223 12 : break;
1224 : }
1225 : }
1226 :
1227 40 : return Loop::Continue;
1228 : });
1229 :
1230 32 : if (isUrgentEvent)
1231 : {
1232 12 : ChipLogDetail(DataManagement, "Urgent event will be sent once reporting is not blocked by the min interval");
1233 12 : return CHIP_NO_ERROR;
1234 : }
1235 :
1236 20 : return ScheduleBufferPressureEventDelivery(aBytesConsumed);
1237 : }
1238 :
1239 312 : void Engine::ScheduleUrgentEventDeliverySync(Optional<FabricIndex> fabricIndex)
1240 : {
1241 312 : mpImEngine->mReadHandlers.ForEachActiveObject([fabricIndex](ReadHandler * handler) {
1242 0 : if (handler->IsType(ReadHandler::InteractionType::Read))
1243 : {
1244 0 : return Loop::Continue;
1245 : }
1246 :
1247 0 : if (fabricIndex.HasValue() && fabricIndex.Value() != handler->GetAccessingFabricIndex())
1248 : {
1249 0 : return Loop::Continue;
1250 : }
1251 :
1252 0 : handler->ForceDirtyState();
1253 :
1254 0 : return Loop::Continue;
1255 : });
1256 :
1257 312 : Run();
1258 312 : }
1259 :
1260 5146 : void Engine::MarkDirty(const AttributePathParams & path)
1261 : {
1262 5146 : CHIP_ERROR err = SetDirty(path);
1263 5146 : if (err != CHIP_NO_ERROR)
1264 : {
1265 0 : ChipLogError(DataManagement, "Failed to set path dirty: %" CHIP_ERROR_FORMAT, err.Format());
1266 : }
1267 5146 : }
1268 :
1269 : } // namespace reporting
1270 : } // namespace app
1271 : } // namespace chip
|