blob: 3511cc1de6517aa8d3e9c79534ec02b1e00340a8 [file] [log] [blame]
Avi Drissman64595482022-09-14 20:52:291// Copyright 2012 The Chromium Authors
license.botbf09a502008-08-24 00:55:552// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
initial.commit586acc5fe2008-07-26 22:42:524
[email protected]c2c5cfc2014-03-03 16:35:285#include "net/disk_cache/blockfile/entry_impl.h"
initial.commit586acc5fe2008-07-26 22:42:526
Maks Orlovicha00fe582025-06-12 22:17:207#include <algorithm>
avid0181f32015-12-10 19:41:478#include <limits>
Peter Boström8a7540692021-04-05 20:48:209#include <memory>
avid0181f32015-12-10 19:41:4710
Maks Orlovich98c00d322025-05-30 14:44:4411#include "base/compiler_specific.h"
Piotr Bialecki62eb5462024-12-10 21:00:3012#include "base/containers/heap_array.h"
Yutaka Hirano238ce652022-04-19 05:20:4013#include "base/files/file_util.h"
Daniel Chengc0581992019-03-29 04:52:5614#include "base/hash/hash.h"
Matthew Dentona21ca7b2019-03-13 19:02:3215#include "base/numerics/safe_math.h"
Maks Orlovich955b5a72025-06-11 21:46:3216#include "base/strings/cstring_view.h"
[email protected]be528af2013-06-11 07:39:4817#include "base/strings/string_util.h"
Daniel Cheng17390fd2025-06-07 06:38:2618#include "base/strings/string_view_util.h"
Gabriel Charetted87f10f2022-03-31 00:44:2219#include "base/time/time.h"
[email protected]74a85ce2009-02-12 00:03:1920#include "net/base/io_buffer.h"
initial.commit586acc5fe2008-07-26 22:42:5221#include "net/base/net_errors.h"
[email protected]c2c5cfc2014-03-03 16:35:2822#include "net/disk_cache/blockfile/backend_impl.h"
23#include "net/disk_cache/blockfile/bitmap.h"
24#include "net/disk_cache/blockfile/disk_format.h"
[email protected]c2c5cfc2014-03-03 16:35:2825#include "net/disk_cache/blockfile/sparse_control.h"
[email protected]a88d601f2008-08-15 20:36:2126#include "net/disk_cache/cache_util.h"
[email protected]9eb8cdf2011-03-17 18:53:0227#include "net/disk_cache/net_log_parameters.h"
mikecironef22f9812016-10-04 03:40:1928#include "net/log/net_log.h"
mikecirone8b85c432016-09-08 19:11:0029#include "net/log/net_log_event_type.h"
30#include "net/log/net_log_source_type.h"
initial.commit586acc5fe2008-07-26 22:42:5231
[email protected]e1acf6f2008-10-27 20:43:3332using base::Time;
[email protected]b36a7bd2010-02-22 23:14:2333using base::TimeTicks;
[email protected]e1acf6f2008-10-27 20:43:3334
initial.commit586acc5fe2008-07-26 22:42:5235namespace {
36
[email protected]62cdf1e2008-12-04 23:03:3337// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
38const int kKeyFileIndex = 3;
39
[email protected]17b89142008-11-07 21:52:1540// This class implements FileIOCallback to buffer the callback from a file IO
41// operation from the actual net class.
initial.commit586acc5fe2008-07-26 22:42:5242class SyncCallback: public disk_cache::FileIOCallback {
43 public:
[email protected]f6f1bebc2011-01-07 03:04:5444 // |end_event_type| is the event type to log on completion. Logs nothing on
45 // discard, or when the NetLog is not set to log all events.
tzik54237ca8c2017-03-08 03:17:2446 SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry,
mikecirone8b85c432016-09-08 19:11:0047 net::IOBuffer* buffer,
Maks Orlovich4acf0b582018-06-07 11:39:4448 net::CompletionOnceCallback callback,
mikecirone8b85c432016-09-08 19:11:0049 net::NetLogEventType end_event_type)
tzik54237ca8c2017-03-08 03:17:2450 : entry_(std::move(entry)),
Maks Orlovich4acf0b582018-06-07 11:39:4451 callback_(std::move(callback)),
mikecirone8b85c432016-09-08 19:11:0052 buf_(buffer),
mikecirone8b85c432016-09-08 19:11:0053 end_event_type_(end_event_type) {
tzik54237ca8c2017-03-08 03:17:2454 entry_->IncrementIoCount();
initial.commit586acc5fe2008-07-26 22:42:5255 }
Peter Boström293b1342021-09-22 17:31:4356
57 SyncCallback(const SyncCallback&) = delete;
58 SyncCallback& operator=(const SyncCallback&) = delete;
59
Chris Watkins68b15032017-12-01 03:07:1360 ~SyncCallback() override = default;
initial.commit586acc5fe2008-07-26 22:42:5261
dchengb03027d2014-10-21 12:00:2062 void OnFileIOComplete(int bytes_copied) override;
initial.commit586acc5fe2008-07-26 22:42:5263 void Discard();
[email protected]f6f1bebc2011-01-07 03:04:5464
initial.commit586acc5fe2008-07-26 22:42:5265 private:
tzik54237ca8c2017-03-08 03:17:2466 scoped_refptr<disk_cache::EntryImpl> entry_;
Maks Orlovich4acf0b582018-06-07 11:39:4467 net::CompletionOnceCallback callback_;
[email protected]74a85ce2009-02-12 00:03:1968 scoped_refptr<net::IOBuffer> buf_;
mikecirone8b85c432016-09-08 19:11:0069 const net::NetLogEventType end_event_type_;
initial.commit586acc5fe2008-07-26 22:42:5270};
71
72void SyncCallback::OnFileIOComplete(int bytes_copied) {
73 entry_->DecrementIoCount();
[email protected]2a65aceb82011-12-19 20:59:2774 if (!callback_.is_null()) {
eroman24bc6a12015-05-06 19:55:4875 if (entry_->net_log().IsCapturing()) {
Eric Roman06bd9742019-07-13 15:19:1376 disk_cache::NetLogReadWriteComplete(entry_->net_log(), end_event_type_,
77 net::NetLogEventPhase::END,
78 bytes_copied);
[email protected]f6f1bebc2011-01-07 03:04:5479 }
Raul Tambre94493c652019-03-11 17:18:3580 buf_ = nullptr; // Release the buffer before invoking the callback.
Maks Orlovich4acf0b582018-06-07 11:39:4481 std::move(callback_).Run(bytes_copied);
[email protected]447baad2009-08-28 23:05:4682 }
initial.commit586acc5fe2008-07-26 22:42:5283 delete this;
84}
85
86void SyncCallback::Discard() {
[email protected]2a65aceb82011-12-19 20:59:2787 callback_.Reset();
Raul Tambre94493c652019-03-11 17:18:3588 buf_ = nullptr;
initial.commit586acc5fe2008-07-26 22:42:5289 OnFileIOComplete(0);
90}
91
[email protected]e1fcf142010-08-23 18:47:2592const int kMaxBufferSize = 1024 * 1024; // 1 MB.
93
initial.commit586acc5fe2008-07-26 22:42:5294} // namespace
95
96namespace disk_cache {
97
[email protected]e1fcf142010-08-23 18:47:2598// This class handles individual memory buffers that store data before it is
99// sent to disk. The buffer can start at any offset, but if we try to write to
100// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
101// zero. The buffer grows up to a size determined by the backend, to keep the
102// total memory used under control.
103class EntryImpl::UserBuffer {
104 public:
Tsuyoshi Horoe0235ed62022-06-09 01:42:30105 explicit UserBuffer(BackendImpl* backend) : backend_(backend->GetWeakPtr()) {
[email protected]e1fcf142010-08-23 18:47:25106 buffer_.reserve(kMaxBlockSize);
107 }
Peter Boström293b1342021-09-22 17:31:43108
109 UserBuffer(const UserBuffer&) = delete;
110 UserBuffer& operator=(const UserBuffer&) = delete;
111
[email protected]e1fcf142010-08-23 18:47:25112 ~UserBuffer() {
[email protected]11fbca0b2013-06-02 23:37:21113 if (backend_.get())
[email protected]e1fcf142010-08-23 18:47:25114 backend_->BufferDeleted(capacity() - kMaxBlockSize);
115 }
116
117 // Returns true if we can handle writing |len| bytes to |offset|.
118 bool PreWrite(int offset, int len);
119
120 // Truncates the buffer to |offset| bytes.
121 void Truncate(int offset);
122
123 // Writes |len| bytes from |buf| at the given |offset|.
[email protected]c67287e2012-03-23 01:19:55124 void Write(int offset, IOBuffer* buf, int len);
[email protected]e1fcf142010-08-23 18:47:25125
126 // Returns true if we can read |len| bytes from |offset|, given that the
127 // actual file has |eof| bytes stored. Note that the number of bytes to read
128 // may be modified by this method even though it returns false: that means we
129 // should do a smaller read from disk.
130 bool PreRead(int eof, int offset, int* len);
131
132 // Read |len| bytes from |buf| at the given |offset|.
[email protected]c67287e2012-03-23 01:19:55133 int Read(int offset, IOBuffer* buf, int len);
[email protected]e1fcf142010-08-23 18:47:25134
135 // Prepare this buffer for reuse.
136 void Reset();
137
Maks Orlovicha00fe582025-06-12 22:17:20138 base::span<uint8_t> as_span() { return buffer_; }
139
[email protected]e1fcf142010-08-23 18:47:25140 int Size() { return static_cast<int>(buffer_.size()); }
141 int Start() { return offset_; }
142 int End() { return offset_ + Size(); }
143
144 private:
145 int capacity() { return static_cast<int>(buffer_.capacity()); }
146 bool GrowBuffer(int required, int limit);
147
148 base::WeakPtr<BackendImpl> backend_;
Tsuyoshi Horoe0235ed62022-06-09 01:42:30149 int offset_ = 0;
Maks Orlovicha00fe582025-06-12 22:17:20150 std::vector<uint8_t> buffer_;
Tsuyoshi Horoe0235ed62022-06-09 01:42:30151 bool grow_allowed_ = true;
[email protected]e1fcf142010-08-23 18:47:25152};
153
154bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
155 DCHECK_GE(offset, 0);
156 DCHECK_GE(len, 0);
157 DCHECK_GE(offset + len, 0);
158
159 // We don't want to write before our current start.
160 if (offset < offset_)
161 return false;
162
163 // Lets get the common case out of the way.
164 if (offset + len <= capacity())
165 return true;
166
167 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
168 // buffer offset_ at 0.
169 if (!Size() && offset > kMaxBlockSize)
170 return GrowBuffer(len, kMaxBufferSize);
171
172 int required = offset - offset_ + len;
173 return GrowBuffer(required, kMaxBufferSize * 6 / 5);
174}
175
176void EntryImpl::UserBuffer::Truncate(int offset) {
177 DCHECK_GE(offset, 0);
178 DCHECK_GE(offset, offset_);
[email protected]42d5be692010-11-04 17:34:38179 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
[email protected]e1fcf142010-08-23 18:47:25180
181 offset -= offset_;
182 if (Size() >= offset)
183 buffer_.resize(offset);
184}
185
[email protected]c67287e2012-03-23 01:19:55186void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
[email protected]e1fcf142010-08-23 18:47:25187 DCHECK_GE(offset, 0);
188 DCHECK_GE(len, 0);
189 DCHECK_GE(offset + len, 0);
Maks Orlovich9c1f287cf2019-04-08 17:15:00190
191 // 0-length writes that don't extend can just be ignored here, and are safe
192 // even if they're are before offset_, as truncates are handled elsewhere.
193 if (len == 0 && offset < End())
194 return;
195
[email protected]e1fcf142010-08-23 18:47:25196 DCHECK_GE(offset, offset_);
[email protected]42d5be692010-11-04 17:34:38197 DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
[email protected]e1fcf142010-08-23 18:47:25198
199 if (!Size() && offset > kMaxBlockSize)
200 offset_ = offset;
201
202 offset -= offset_;
203
204 if (offset > Size())
205 buffer_.resize(offset);
206
207 if (!len)
208 return;
209
Maks Orlovicha00fe582025-06-12 22:17:20210 base::span<uint8_t> in_buf = buf->first(len);
[email protected]e1fcf142010-08-23 18:47:25211 int valid_len = Size() - offset;
212 int copy_len = std::min(valid_len, len);
213 if (copy_len) {
Maks Orlovicha00fe582025-06-12 22:17:20214 size_t sz_offset = base::checked_cast<size_t>(offset);
215 size_t sz_len = base::checked_cast<size_t>(copy_len);
[email protected]e1fcf142010-08-23 18:47:25216
Maks Orlovicha00fe582025-06-12 22:17:20217 base::span(buffer_)
218 .subspan(sz_offset, sz_len)
219 .copy_from_nonoverlapping(in_buf.take_first(sz_len));
220 }
221 if (in_buf.empty()) {
222 return;
223 }
224
225 buffer_.insert(buffer_.end(), in_buf.begin(), in_buf.end());
[email protected]e1fcf142010-08-23 18:47:25226}
227
228bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
229 DCHECK_GE(offset, 0);
230 DCHECK_GT(*len, 0);
231
232 if (offset < offset_) {
233 // We are reading before this buffer.
234 if (offset >= eof)
235 return true;
236
237 // If the read overlaps with the buffer, change its length so that there is
238 // no overlap.
239 *len = std::min(*len, offset_ - offset);
240 *len = std::min(*len, eof - offset);
241
242 // We should read from disk.
243 return false;
244 }
245
246 if (!Size())
247 return false;
248
249 // See if we can fulfill the first part of the operation.
250 return (offset - offset_ < Size());
251}
252
[email protected]c67287e2012-03-23 01:19:55253int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
[email protected]e1fcf142010-08-23 18:47:25254 DCHECK_GE(offset, 0);
255 DCHECK_GT(len, 0);
256 DCHECK(Size() || offset < offset_);
257
Maks Orlovicha00fe582025-06-12 22:17:20258 base::span<uint8_t> dest = buf->span();
259
[email protected]e1fcf142010-08-23 18:47:25260 int clean_bytes = 0;
261 if (offset < offset_) {
262 // We don't have a file so lets fill the first part with 0.
263 clean_bytes = std::min(offset_ - offset, len);
Maks Orlovicha00fe582025-06-12 22:17:20264 std::ranges::fill(dest.take_first(base::checked_cast<size_t>(clean_bytes)),
265 0);
[email protected]e1fcf142010-08-23 18:47:25266 if (len == clean_bytes)
267 return len;
268 offset = offset_;
269 len -= clean_bytes;
270 }
271
272 int start = offset - offset_;
273 int available = Size() - start;
274 DCHECK_GE(start, 0);
275 DCHECK_GE(available, 0);
276 len = std::min(len, available);
Maks Orlovicha00fe582025-06-12 22:17:20277 size_t sz_len = base::checked_cast<size_t>(len);
278 size_t sz_start = base::checked_cast<size_t>(start);
279 dest.first(sz_len).copy_from_nonoverlapping(
280 base::span(buffer_).subspan(sz_start, sz_len));
[email protected]e1fcf142010-08-23 18:47:25281 return len + clean_bytes;
282}
283
284void EntryImpl::UserBuffer::Reset() {
285 if (!grow_allowed_) {
[email protected]11fbca0b2013-06-02 23:37:21286 if (backend_.get())
[email protected]e1fcf142010-08-23 18:47:25287 backend_->BufferDeleted(capacity() - kMaxBlockSize);
288 grow_allowed_ = true;
Maks Orlovicha00fe582025-06-12 22:17:20289 std::vector<uint8_t> tmp;
[email protected]e1fcf142010-08-23 18:47:25290 buffer_.swap(tmp);
[email protected]9cb851f2010-09-29 17:32:40291 buffer_.reserve(kMaxBlockSize);
[email protected]e1fcf142010-08-23 18:47:25292 }
293 offset_ = 0;
294 buffer_.clear();
295}
296
297bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
298 DCHECK_GE(required, 0);
299 int current_size = capacity();
300 if (required <= current_size)
301 return true;
302
303 if (required > limit)
304 return false;
305
[email protected]11fbca0b2013-06-02 23:37:21306 if (!backend_.get())
[email protected]e1fcf142010-08-23 18:47:25307 return false;
308
309 int to_add = std::max(required - current_size, kMaxBlockSize * 4);
310 to_add = std::max(current_size, to_add);
311 required = std::min(current_size + to_add, limit);
312
313 grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
314 if (!grow_allowed_)
315 return false;
316
[email protected]42d5be692010-11-04 17:34:38317 DVLOG(3) << "Buffer grow to " << required;
318
[email protected]e1fcf142010-08-23 18:47:25319 buffer_.reserve(required);
320 return true;
321}
322
323// ------------------------------------------------------------------------
324
[email protected]67b09ec2010-08-27 17:49:42325EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
Raul Tambre94493c652019-03-11 17:18:35326 : entry_(nullptr, Addr(0)),
327 node_(nullptr, Addr(0)),
328 backend_(backend->GetWeakPtr()),
Tsuyoshi Horoe0235ed62022-06-09 01:42:30329 read_only_(read_only) {
initial.commit586acc5fe2008-07-26 22:42:52330 entry_.LazyInit(backend->File(address), address);
initial.commit586acc5fe2008-07-26 22:42:52331}
332
[email protected]2235b222010-07-15 21:03:43333void EntryImpl::DoomImpl() {
[email protected]11fbca0b2013-06-02 23:37:21334 if (doomed_ || !backend_.get())
[email protected]2235b222010-07-15 21:03:43335 return;
336
337 SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
338 backend_->InternalDoomEntry(this);
339}
340
Maks Orlovich4acf0b582018-06-07 11:39:44341int EntryImpl::ReadDataImpl(int index,
342 int offset,
343 IOBuffer* buf,
344 int buf_len,
345 CompletionOnceCallback callback) {
eroman24bc6a12015-05-06 19:55:48346 if (net_log_.IsCapturing()) {
Eric Roman06bd9742019-07-13 15:19:13347 NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
348 net::NetLogEventPhase::BEGIN, index, offset, buf_len,
349 false);
[email protected]f6f1bebc2011-01-07 03:04:54350 }
351
Maks Orlovich4acf0b582018-06-07 11:39:44352 int result =
353 InternalReadData(index, offset, buf, buf_len, std::move(callback));
[email protected]f6f1bebc2011-01-07 03:04:54354
eroman24bc6a12015-05-06 19:55:48355 if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
Eric Roman06bd9742019-07-13 15:19:13356 NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA,
357 net::NetLogEventPhase::END, result);
[email protected]f6f1bebc2011-01-07 03:04:54358 }
359 return result;
360}
361
Maks Orlovich4acf0b582018-06-07 11:39:44362int EntryImpl::WriteDataImpl(int index,
363 int offset,
364 IOBuffer* buf,
365 int buf_len,
366 CompletionOnceCallback callback,
John Abd-El-Malek0dcbbcfd2023-03-21 00:27:50367 bool truncate) {
eroman24bc6a12015-05-06 19:55:48368 if (net_log_.IsCapturing()) {
Eric Roman06bd9742019-07-13 15:19:13369 NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
370 net::NetLogEventPhase::BEGIN, index, offset, buf_len,
371 truncate);
[email protected]f6f1bebc2011-01-07 03:04:54372 }
373
Maks Orlovich4acf0b582018-06-07 11:39:44374 int result = InternalWriteData(index, offset, buf, buf_len,
John Abd-El-Malek0dcbbcfd2023-03-21 00:27:50375 std::move(callback), truncate);
[email protected]f6f1bebc2011-01-07 03:04:54376
eroman24bc6a12015-05-06 19:55:48377 if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) {
Eric Roman06bd9742019-07-13 15:19:13378 NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA,
379 net::NetLogEventPhase::END, result);
[email protected]f6f1bebc2011-01-07 03:04:54380 }
381 return result;
382}
383
avid0181f32015-12-10 19:41:47384int EntryImpl::ReadSparseDataImpl(int64_t offset,
385 IOBuffer* buf,
386 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44387 CompletionOnceCallback callback) {
[email protected]67b09ec2010-08-27 17:49:42388 DCHECK(node_.Data()->dirty || read_only_);
[email protected]84d4cee2009-06-18 23:46:58389 int result = InitSparseData();
390 if (net::OK != result)
391 return result;
392
[email protected]55185492009-06-25 17:28:31393 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44394 std::move(callback));
[email protected]55185492009-06-25 17:28:31395 return result;
[email protected]a2068a612009-06-04 21:43:49396}
397
avid0181f32015-12-10 19:41:47398int EntryImpl::WriteSparseDataImpl(int64_t offset,
399 IOBuffer* buf,
400 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44401 CompletionOnceCallback callback) {
[email protected]67b09ec2010-08-27 17:49:42402 DCHECK(node_.Data()->dirty || read_only_);
[email protected]84d4cee2009-06-18 23:46:58403 int result = InitSparseData();
404 if (net::OK != result)
405 return result;
406
[email protected]55185492009-06-25 17:28:31407 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
Maks Orlovich4acf0b582018-06-07 11:39:44408 buf_len, std::move(callback));
[email protected]55185492009-06-25 17:28:31409 return result;
[email protected]a2068a612009-06-04 21:43:49410}
411
Maks Orlovich04cd1ad2021-07-02 17:32:24412RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) {
[email protected]2a5ee412010-07-17 01:03:10413 int result = InitSparseData();
414 if (net::OK != result)
Maks Orlovich04cd1ad2021-07-02 17:32:24415 return RangeResult(static_cast<net::Error>(result));
[email protected]2a5ee412010-07-17 01:03:10416
Maks Orlovich04cd1ad2021-07-02 17:32:24417 return sparse_->GetAvailableRange(offset, len);
[email protected]2a5ee412010-07-17 01:03:10418}
419
[email protected]fb2622f2010-07-13 18:00:56420void EntryImpl::CancelSparseIOImpl() {
[email protected]06e62ba2009-10-08 23:07:39421 if (!sparse_.get())
422 return;
423
424 sparse_->CancelIO();
425}
426
Maks Orlovich4acf0b582018-06-07 11:39:44427int EntryImpl::ReadyForSparseIOImpl(CompletionOnceCallback callback) {
[email protected]fb2622f2010-07-13 18:00:56428 DCHECK(sparse_.get());
Maks Orlovich4acf0b582018-06-07 11:39:44429 return sparse_->ReadyToUse(std::move(callback));
[email protected]fb2622f2010-07-13 18:00:56430}
431
avid0181f32015-12-10 19:41:47432uint32_t EntryImpl::GetHash() {
[email protected]bdad42a42008-07-31 21:28:48433 return entry_.Data()->hash;
434}
435
avid0181f32015-12-10 19:41:47436bool EntryImpl::CreateEntry(Addr node_address,
437 const std::string& key,
438 uint32_t hash) {
[email protected]bdad42a42008-07-31 21:28:48439 EntryStore* entry_store = entry_.Data();
440 RankingsNode* node = node_.Data();
Maks Orlovicha00fe582025-06-12 22:17:20441 *node = RankingsNode();
442 std::ranges::fill(base::as_writable_bytes(entry_.AllData()), 0);
443
[email protected]bdad42a42008-07-31 21:28:48444 if (!node_.LazyInit(backend_->File(node_address), node_address))
445 return false;
446
447 entry_store->rankings_node = node_address.value();
448 node->contents = entry_.address().value();
[email protected]bdad42a42008-07-31 21:28:48449
450 entry_store->hash = hash;
[email protected]62cdf1e2008-12-04 23:03:33451 entry_store->creation_time = Time::Now().ToInternalValue();
avid0181f32015-12-10 19:41:47452 entry_store->key_len = static_cast<int32_t>(key.size());
[email protected]bdad42a42008-07-31 21:28:48453 if (entry_store->key_len > kMaxInternalKeyLength) {
454 Addr address(0);
455 if (!CreateBlock(entry_store->key_len + 1, &address))
456 return false;
457
458 entry_store->long_key = address.value();
[email protected]fb2622f2010-07-13 18:00:56459 File* key_file = GetBackingFile(address, kKeyFileIndex);
460 key_ = key;
[email protected]bdad42a42008-07-31 21:28:48461
462 size_t offset = 0;
463 if (address.is_block_file())
464 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
465
Maks Orlovich955b5a72025-06-11 21:46:32466 if (!key_file ||
467 !key_file->Write(
468 base::byte_span_with_nul_from_cstring_view(base::cstring_view(key)),
469 offset)) {
[email protected]bdad42a42008-07-31 21:28:48470 DeleteData(address, kKeyFileIndex);
471 return false;
472 }
473
474 if (address.is_separate_file())
[email protected]fb2622f2010-07-13 18:00:56475 key_file->SetLength(key.size() + 1);
[email protected]bdad42a42008-07-31 21:28:48476 } else {
Maks Orlovich98c00d322025-05-30 14:44:44477 auto internal_key = InternalKeySpan();
478 internal_key.copy_prefix_from(key);
479 internal_key.at(key.size()) = '\0';
[email protected]bdad42a42008-07-31 21:28:48480 }
avid0181f32015-12-10 19:41:47481 backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size()));
[email protected]bdad42a42008-07-31 21:28:48482 node->dirty = backend_->GetCurrentEntryId();
[email protected]bdad42a42008-07-31 21:28:48483 return true;
484}
485
avid0181f32015-12-10 19:41:47486bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) {
[email protected]2b1d4f22008-08-19 23:03:40487 if (entry_.Data()->hash != hash ||
488 static_cast<size_t>(entry_.Data()->key_len) != key.size())
[email protected]bdad42a42008-07-31 21:28:48489 return false;
490
[email protected]f76184a2012-09-19 05:28:58491 return (key.compare(GetKey()) == 0);
[email protected]bdad42a42008-07-31 21:28:48492}
493
494void EntryImpl::InternalDoom() {
mikecirone8b85c432016-09-08 19:11:00495 net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM);
[email protected]bdad42a42008-07-31 21:28:48496 DCHECK(node_.HasData());
497 if (!node_.Data()->dirty) {
498 node_.Data()->dirty = backend_->GetCurrentEntryId();
499 node_.Store();
500 }
501 doomed_ = true;
502}
503
[email protected]90c7aa0f2009-03-06 20:05:34504void EntryImpl::DeleteEntryData(bool everything) {
505 DCHECK(doomed_ || !everything);
506
[email protected]7d4e3a82009-07-09 22:01:29507 if (GetEntryFlags() & PARENT_ENTRY) {
508 // We have some child entries that must go away.
509 SparseControl::DeleteChildren(this);
510 }
511
[email protected]55185492009-06-25 17:28:31512 for (int index = 0; index < kNumStreams; index++) {
[email protected]90c7aa0f2009-03-06 20:05:34513 Addr address(entry_.Data()->data_addr[index]);
514 if (address.is_initialized()) {
[email protected]90c7aa0f2009-03-06 20:05:34515 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
516 unreported_size_[index], 0);
[email protected]1849d012009-03-18 18:54:52517 entry_.Data()->data_addr[index] = 0;
518 entry_.Data()->data_size[index] = 0;
[email protected]65188eb2010-09-16 20:59:29519 entry_.Store();
520 DeleteData(address, index);
[email protected]90c7aa0f2009-03-06 20:05:34521 }
522 }
523
[email protected]65188eb2010-09-16 20:59:29524 if (!everything)
[email protected]90c7aa0f2009-03-06 20:05:34525 return;
526
527 // Remove all traces of this entry.
528 backend_->RemoveEntry(this);
529
[email protected]e47737e22011-09-29 18:59:03530 // Note that at this point node_ and entry_ are just two blocks of data, and
531 // even if they reference each other, nobody should be referencing them.
532
[email protected]90c7aa0f2009-03-06 20:05:34533 Addr address(entry_.Data()->long_key);
534 DeleteData(address, kKeyFileIndex);
535 backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
536
[email protected]e47737e22011-09-29 18:59:03537 backend_->DeleteBlock(entry_.address(), true);
[email protected]f4674422013-04-17 17:54:41538 entry_.Discard();
[email protected]90c7aa0f2009-03-06 20:05:34539
[email protected]f4674422013-04-17 17:54:41540 if (!LeaveRankingsBehind()) {
[email protected]e47737e22011-09-29 18:59:03541 backend_->DeleteBlock(node_.address(), true);
[email protected]f4674422013-04-17 17:54:41542 node_.Discard();
543 }
[email protected]90c7aa0f2009-03-06 20:05:34544}
545
[email protected]bdad42a42008-07-31 21:28:48546CacheAddr EntryImpl::GetNextAddress() {
547 return entry_.Data()->next;
548}
549
550void EntryImpl::SetNextAddress(Addr address) {
[email protected]ceb61da32011-01-25 23:52:02551 DCHECK_NE(address.value(), entry_.address().value());
[email protected]bdad42a42008-07-31 21:28:48552 entry_.Data()->next = address.value();
553 bool success = entry_.Store();
554 DCHECK(success);
555}
556
557bool EntryImpl::LoadNodeAddress() {
558 Addr address(entry_.Data()->rankings_node);
559 if (!node_.LazyInit(backend_->File(address), address))
560 return false;
561 return node_.Load();
562}
563
[email protected]c4c32fd2009-07-07 19:47:08564bool EntryImpl::Update() {
565 DCHECK(node_.HasData());
[email protected]bdad42a42008-07-31 21:28:48566
[email protected]67b09ec2010-08-27 17:49:42567 if (read_only_)
568 return true;
569
[email protected]c4c32fd2009-07-07 19:47:08570 RankingsNode* rankings = node_.Data();
[email protected]66f16b12009-07-30 18:51:41571 if (!rankings->dirty) {
[email protected]c4c32fd2009-07-07 19:47:08572 rankings->dirty = backend_->GetCurrentEntryId();
[email protected]c4c32fd2009-07-07 19:47:08573 if (!node_.Store())
574 return false;
[email protected]bdad42a42008-07-31 21:28:48575 }
[email protected]c4c32fd2009-07-07 19:47:08576 return true;
[email protected]bdad42a42008-07-31 21:28:48577}
578
avid0181f32015-12-10 19:41:47579void EntryImpl::SetDirtyFlag(int32_t current_id) {
[email protected]bdad42a42008-07-31 21:28:48580 DCHECK(node_.HasData());
[email protected]5f274d22011-02-23 00:55:38581 if (node_.Data()->dirty && current_id != node_.Data()->dirty)
582 dirty_ = true;
[email protected]e47737e22011-09-29 18:59:03583
584 if (!current_id)
585 dirty_ = true;
[email protected]bdad42a42008-07-31 21:28:48586}
587
avid0181f32015-12-10 19:41:47588void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) {
[email protected]bdad42a42008-07-31 21:28:48589 node_.Data()->dirty = new_id;
[email protected]bdad42a42008-07-31 21:28:48590 node_.Store();
591}
592
[email protected]e47737e22011-09-29 18:59:03593bool EntryImpl::LeaveRankingsBehind() {
594 return !node_.Data()->contents;
595}
596
597// This only includes checks that relate to the first block of the entry (the
598// first 256 bytes), and values that should be set from the entry creation.
599// Basically, even if there is something wrong with this entry, we want to see
600// if it is possible to load the rankings node and delete them together.
[email protected]bdad42a42008-07-31 21:28:48601bool EntryImpl::SanityCheck() {
[email protected]09ad77a82011-11-30 23:37:25602 if (!entry_.VerifyHash())
603 return false;
604
[email protected]3c7641d92011-04-14 21:12:00605 EntryStore* stored = entry_.Data();
606 if (!stored->rankings_node || stored->key_len <= 0)
[email protected]bdad42a42008-07-31 21:28:48607 return false;
608
[email protected]3c7641d92011-04-14 21:12:00609 if (stored->reuse_count < 0 || stored->refetch_count < 0)
610 return false;
611
612 Addr rankings_addr(stored->rankings_node);
[email protected]82ae49e2012-05-01 04:58:18613 if (!rankings_addr.SanityCheckForRankings())
[email protected]bdad42a42008-07-31 21:28:48614 return false;
615
[email protected]3c7641d92011-04-14 21:12:00616 Addr next_addr(stored->next);
gavinp1b450062016-04-22 00:33:27617 if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) {
[email protected]ea9080b2011-10-20 19:12:25618 STRESS_NOTREACHED();
[email protected]bdad42a42008-07-31 21:28:48619 return false;
[email protected]ea9080b2011-10-20 19:12:25620 }
621 STRESS_DCHECK(next_addr.value() != entry_.address().value());
[email protected]bdad42a42008-07-31 21:28:48622
[email protected]3c7641d92011-04-14 21:12:00623 if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
624 return false;
625
626 Addr key_addr(stored->long_key);
[email protected]b9b4ce0b2011-07-19 18:10:51627 if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
628 (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
[email protected]3c7641d92011-04-14 21:12:00629 return false;
630
gavinp1b450062016-04-22 00:33:27631 if (!key_addr.SanityCheck())
[email protected]3c7641d92011-04-14 21:12:00632 return false;
633
[email protected]b9b4ce0b2011-07-19 18:10:51634 if (key_addr.is_initialized() &&
[email protected]4e4e45c2011-10-20 17:19:32635 ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
636 (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
[email protected]b9b4ce0b2011-07-19 18:10:51637 return false;
638
639 int num_blocks = NumBlocksForEntry(stored->key_len);
640 if (entry_.address().num_blocks() != num_blocks)
641 return false;
642
[email protected]e47737e22011-09-29 18:59:03643 return true;
644}
645
646bool EntryImpl::DataSanityCheck() {
647 EntryStore* stored = entry_.Data();
648 Addr key_addr(stored->long_key);
649
Maks Orlovich98c00d322025-05-30 14:44:44650 // The key must be NULL terminated. Note the only caller of this is
651 // BackendImpl::NewEntry, which checks EntryImpl::SanityCheck() first. That
652 // ensures (among other things) that `key_addr.is_initialized()` reflects
653 // whether the key is external or inside `stored->key` accurately; and in
654 // case of internal key 0 <= key_len <= kMaxInternalKeyLength.
655 if (!key_addr.is_initialized() &&
656 InternalKeySpan().at(static_cast<size_t>(stored->key_len)) != '\0') {
[email protected]b9b4ce0b2011-07-19 18:10:51657 return false;
Maks Orlovich98c00d322025-05-30 14:44:44658 }
[email protected]b9b4ce0b2011-07-19 18:10:51659
Daniel Chengabd729092019-11-20 10:40:17660 if (stored->hash != base::PersistentHash(GetKey()))
[email protected]3c7641d92011-04-14 21:12:00661 return false;
662
663 for (int i = 0; i < kNumStreams; i++) {
664 Addr data_addr(stored->data_addr[i]);
665 int data_size = stored->data_size[i];
666 if (data_size < 0)
667 return false;
668 if (!data_size && data_addr.is_initialized())
669 return false;
gavinp1b450062016-04-22 00:33:27670 if (!data_addr.SanityCheck())
[email protected]3c7641d92011-04-14 21:12:00671 return false;
672 if (!data_size)
673 continue;
674 if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
675 return false;
676 if (data_size > kMaxBlockSize && data_addr.is_block_file())
677 return false;
678 }
[email protected]bdad42a42008-07-31 21:28:48679 return true;
680}
681
[email protected]e47737e22011-09-29 18:59:03682void EntryImpl::FixForDelete() {
683 EntryStore* stored = entry_.Data();
684 Addr key_addr(stored->long_key);
685
Maks Orlovich98c00d322025-05-30 14:44:44686 // Note: this passed `SanityCheck()` which is sufficient for `stored->key` to
687 // be the right size for `key_len` if `key_addr` is not initialized, and for
688 // `key_len` to be in right range. It failed `DataSanityCheck()`, however,
689 // so the null termination may be missing.
690 if (!key_addr.is_initialized()) {
691 InternalKeySpan().at(static_cast<size_t>(stored->key_len)) = '\0';
692 }
[email protected]e47737e22011-09-29 18:59:03693
694 for (int i = 0; i < kNumStreams; i++) {
695 Addr data_addr(stored->data_addr[i]);
696 int data_size = stored->data_size[i];
697 if (data_addr.is_initialized()) {
698 if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
699 (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
gavinp1b450062016-04-22 00:33:27700 !data_addr.SanityCheck()) {
[email protected]ea9080b2011-10-20 19:12:25701 STRESS_NOTREACHED();
[email protected]e47737e22011-09-29 18:59:03702 // The address is weird so don't attempt to delete it.
703 stored->data_addr[i] = 0;
704 // In general, trust the stored size as it should be in sync with the
705 // total size tracked by the backend.
706 }
707 }
708 if (data_size < 0)
709 stored->data_size[i] = 0;
710 }
711 entry_.Store();
712}
713
[email protected]bdad42a42008-07-31 21:28:48714void EntryImpl::IncrementIoCount() {
715 backend_->IncrementIoCount();
716}
717
718void EntryImpl::DecrementIoCount() {
[email protected]11fbca0b2013-06-02 23:37:21719 if (backend_.get())
[email protected]4739f7092012-03-20 23:15:10720 backend_->DecrementIoCount();
721}
722
723void EntryImpl::OnEntryCreated(BackendImpl* backend) {
724 // Just grab a reference to the backround queue.
725 background_queue_ = backend->GetBackgroundQueue();
[email protected]bdad42a42008-07-31 21:28:48726}
727
Maks Orlovich31db0fc02025-03-25 13:17:14728void EntryImpl::SetTimes(base::Time last_used) {
729 auto timestamp = last_used.ToInternalValue();
730 auto* node_data = node_.Data();
731 node_data->last_used = timestamp;
732 node_data->no_longer_used_last_modified = timestamp;
[email protected]07c3f842008-12-02 19:14:29733 node_.set_modified();
734}
735
[email protected]f6f1bebc2011-01-07 03:04:54736void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
737 DCHECK(!net_log_.net_log());
tfarina428341112016-09-22 13:38:20738 net_log_ = net::NetLogWithSource::Make(
739 net_log, net::NetLogSourceType::DISK_CACHE_ENTRY);
Eric Roman06bd9742019-07-13 15:19:13740 net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL, [&] {
741 return CreateNetLogParametersEntryCreationParams(this, created);
742 });
[email protected]f6f1bebc2011-01-07 03:04:54743}
744
tfarina428341112016-09-22 13:38:20745const net::NetLogWithSource& EntryImpl::net_log() const {
[email protected]f6f1bebc2011-01-07 03:04:54746 return net_log_;
747}
748
[email protected]b9b4ce0b2011-07-19 18:10:51749// static
750int EntryImpl::NumBlocksForEntry(int key_size) {
751 // The longest key that can be stored using one block.
752 int key1_len =
753 static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
754
755 if (key_size < key1_len || key_size > kMaxInternalKeyLength)
756 return 1;
757
758 return ((key_size - key1_len) / 256 + 2);
759}
760
761// ------------------------------------------------------------------------
762
[email protected]4b3c95dd2011-01-07 23:02:11763void EntryImpl::Doom() {
[email protected]11fbca0b2013-06-02 23:37:21764 if (background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10765 background_queue_->DoomEntryImpl(this);
[email protected]4b3c95dd2011-01-07 23:02:11766}
767
768void EntryImpl::Close() {
[email protected]11fbca0b2013-06-02 23:37:21769 if (background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10770 background_queue_->CloseEntryImpl(this);
[email protected]4b3c95dd2011-01-07 23:02:11771}
772
773std::string EntryImpl::GetKey() const {
774 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
[email protected]b9b4ce0b2011-07-19 18:10:51775 int key_len = entry->Data()->key_len;
776 if (key_len <= kMaxInternalKeyLength)
Maks Orlovich98c00d322025-05-30 14:44:44777 return std::string(base::as_string_view(
778 InternalKeySpan().first(static_cast<size_t>(key_len))));
[email protected]4b3c95dd2011-01-07 23:02:11779
780 // We keep a copy of the key so that we can always return it, even if the
781 // backend is disabled.
782 if (!key_.empty())
783 return key_;
784
785 Addr address(entry->Data()->long_key);
786 DCHECK(address.is_initialized());
787 size_t offset = 0;
788 if (address.is_block_file())
789 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
790
mostynb91e0da982015-01-20 19:17:27791 static_assert(kNumStreams == kKeyFileIndex, "invalid key index");
[email protected]4b3c95dd2011-01-07 23:02:11792 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
793 kKeyFileIndex);
[email protected]c3b59872012-01-27 02:43:33794 if (!key_file)
795 return std::string();
[email protected]4b3c95dd2011-01-07 23:02:11796
David Benjamin24eeb08a2024-02-08 23:33:30797 // We store a trailing \0 on disk.
798 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len + 1)) {
[email protected]b9b4ce0b2011-07-19 18:10:51799 return std::string();
David Benjamin24eeb08a2024-02-08 23:33:30800 }
[email protected]b9b4ce0b2011-07-19 18:10:51801
David Benjamin24eeb08a2024-02-08 23:33:30802 // Do not attempt read up to the expected on-disk '\0' --- which would be
803 // |key_len + 1| bytes total --- as if due to a corrupt file it isn't |key_|
804 // would get its internal nul messed up.
805 key_.resize(key_len);
Maks Orlovich955b5a72025-06-11 21:46:32806 if (!key_file->Read(base::as_writable_byte_span(key_), offset)) {
[email protected]4b3c95dd2011-01-07 23:02:11807 key_.clear();
David Benjamin24eeb08a2024-02-08 23:33:30808 }
Maks Orlovich07aac5ad2019-07-26 21:52:51809 DCHECK_LE(strlen(key_.data()), static_cast<size_t>(key_len));
[email protected]4b3c95dd2011-01-07 23:02:11810 return key_;
811}
812
813Time EntryImpl::GetLastUsed() const {
814 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
815 return Time::FromInternalValue(node->Data()->last_used);
816}
817
avid0181f32015-12-10 19:41:47818int32_t EntryImpl::GetDataSize(int index) const {
[email protected]4b3c95dd2011-01-07 23:02:11819 if (index < 0 || index >= kNumStreams)
820 return 0;
821
822 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
823 return entry->Data()->data_size[index];
824}
825
Maks Orlovich4acf0b582018-06-07 11:39:44826int EntryImpl::ReadData(int index,
827 int offset,
828 IOBuffer* buf,
829 int buf_len,
830 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27831 if (callback.is_null())
Maks Orlovich4acf0b582018-06-07 11:39:44832 return ReadDataImpl(index, offset, buf, buf_len, std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11833
834 DCHECK(node_.Data()->dirty || read_only_);
835 if (index < 0 || index >= kNumStreams)
836 return net::ERR_INVALID_ARGUMENT;
837
838 int entry_size = entry_.Data()->data_size[index];
Eriko Kurimoto27902392025-06-23 13:16:17839 if (offset >= entry_size || !buf_len) {
[email protected]4b3c95dd2011-01-07 23:02:11840 return 0;
Eriko Kurimoto27902392025-06-23 13:16:17841 }
[email protected]4b3c95dd2011-01-07 23:02:11842
Eriko Kurimoto27902392025-06-23 13:16:17843 if (offset < 0 || buf_len < 0) {
[email protected]4b3c95dd2011-01-07 23:02:11844 return net::ERR_INVALID_ARGUMENT;
Eriko Kurimoto27902392025-06-23 13:16:17845 }
[email protected]4b3c95dd2011-01-07 23:02:11846
[email protected]11fbca0b2013-06-02 23:37:21847 if (!background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10848 return net::ERR_UNEXPECTED;
849
Maks Orlovich4acf0b582018-06-07 11:39:44850 background_queue_->ReadData(this, index, offset, buf, buf_len,
851 std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11852 return net::ERR_IO_PENDING;
853}
854
Maks Orlovich4acf0b582018-06-07 11:39:44855int EntryImpl::WriteData(int index,
856 int offset,
857 IOBuffer* buf,
858 int buf_len,
859 CompletionOnceCallback callback,
860 bool truncate) {
John Abd-El-Malekbb9a6022022-09-28 23:53:11861 if (callback.is_null()) {
John Abd-El-Malek0dcbbcfd2023-03-21 00:27:50862 return WriteDataImpl(index, offset, buf, buf_len, std::move(callback),
863 truncate);
John Abd-El-Malekbb9a6022022-09-28 23:53:11864 }
[email protected]4b3c95dd2011-01-07 23:02:11865
866 DCHECK(node_.Data()->dirty || read_only_);
867 if (index < 0 || index >= kNumStreams)
868 return net::ERR_INVALID_ARGUMENT;
869
870 if (offset < 0 || buf_len < 0)
871 return net::ERR_INVALID_ARGUMENT;
872
Maks Orlovich5e7ded62025-06-30 16:21:50873 if (!buf && buf_len != 0) {
874 return net::ERR_INVALID_ARGUMENT;
875 }
876
[email protected]11fbca0b2013-06-02 23:37:21877 if (!background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10878 return net::ERR_UNEXPECTED;
879
880 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
Maks Orlovich4acf0b582018-06-07 11:39:44881 std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11882 return net::ERR_IO_PENDING;
883}
884
avid0181f32015-12-10 19:41:47885int EntryImpl::ReadSparseData(int64_t offset,
886 IOBuffer* buf,
887 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44888 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27889 if (callback.is_null())
Maks Orlovich4acf0b582018-06-07 11:39:44890 return ReadSparseDataImpl(offset, buf, buf_len, std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11891
[email protected]11fbca0b2013-06-02 23:37:21892 if (!background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10893 return net::ERR_UNEXPECTED;
894
Maks Orlovich4acf0b582018-06-07 11:39:44895 background_queue_->ReadSparseData(this, offset, buf, buf_len,
896 std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11897 return net::ERR_IO_PENDING;
898}
899
avid0181f32015-12-10 19:41:47900int EntryImpl::WriteSparseData(int64_t offset,
901 IOBuffer* buf,
902 int buf_len,
Maks Orlovich4acf0b582018-06-07 11:39:44903 CompletionOnceCallback callback) {
[email protected]2a65aceb82011-12-19 20:59:27904 if (callback.is_null())
Maks Orlovich4acf0b582018-06-07 11:39:44905 return WriteSparseDataImpl(offset, buf, buf_len, std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11906
[email protected]11fbca0b2013-06-02 23:37:21907 if (!background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10908 return net::ERR_UNEXPECTED;
909
Maks Orlovich4acf0b582018-06-07 11:39:44910 background_queue_->WriteSparseData(this, offset, buf, buf_len,
911 std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11912 return net::ERR_IO_PENDING;
913}
914
Maks Orlovich04cd1ad2021-07-02 17:32:24915RangeResult EntryImpl::GetAvailableRange(int64_t offset,
916 int len,
917 RangeResultCallback callback) {
[email protected]11fbca0b2013-06-02 23:37:21918 if (!background_queue_.get())
Maks Orlovich04cd1ad2021-07-02 17:32:24919 return RangeResult(net::ERR_UNEXPECTED);
[email protected]4739f7092012-03-20 23:15:10920
Maks Orlovich04cd1ad2021-07-02 17:32:24921 background_queue_->GetAvailableRange(this, offset, len, std::move(callback));
922 return RangeResult(net::ERR_IO_PENDING);
[email protected]4b3c95dd2011-01-07 23:02:11923}
924
925bool EntryImpl::CouldBeSparse() const {
926 if (sparse_.get())
927 return true;
928
Lei Zhange00db752021-04-17 00:48:46929 auto sparse = std::make_unique<SparseControl>(const_cast<EntryImpl*>(this));
[email protected]4b3c95dd2011-01-07 23:02:11930 return sparse->CouldBeSparse();
931}
932
933void EntryImpl::CancelSparseIO() {
[email protected]11fbca0b2013-06-02 23:37:21934 if (background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10935 background_queue_->CancelSparseIO(this);
[email protected]4b3c95dd2011-01-07 23:02:11936}
937
Victor Costan45c36ac2018-10-08 07:31:52938net::Error EntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) {
[email protected]4b3c95dd2011-01-07 23:02:11939 if (!sparse_.get())
940 return net::OK;
941
[email protected]11fbca0b2013-06-02 23:37:21942 if (!background_queue_.get())
[email protected]4739f7092012-03-20 23:15:10943 return net::ERR_UNEXPECTED;
944
Maks Orlovich4acf0b582018-06-07 11:39:44945 background_queue_->ReadyForSparseIO(this, std::move(callback));
[email protected]4b3c95dd2011-01-07 23:02:11946 return net::ERR_IO_PENDING;
947}
948
Jay Civelli78612bf2018-03-01 20:59:12949void EntryImpl::SetLastUsedTimeForTest(base::Time time) {
Maks Orlovich31db0fc02025-03-25 13:17:14950 SetTimes(time);
Jay Civelli78612bf2018-03-01 20:59:12951}
952
[email protected]4b3c95dd2011-01-07 23:02:11953// When an entry is deleted from the cache, we clean up all the data associated
954// with it for two reasons: to simplify the reuse of the block (we know that any
955// unused block is filled with zeros), and to simplify the handling of write /
956// read partial information from an entry (don't have to worry about returning
957// data related to a previous cache entry because the range was not fully
958// written before).
959EntryImpl::~EntryImpl() {
[email protected]11fbca0b2013-06-02 23:37:21960 if (!backend_.get()) {
[email protected]4739f7092012-03-20 23:15:10961 entry_.clear_modified();
962 node_.clear_modified();
963 return;
964 }
[email protected]4b3c95dd2011-01-07 23:02:11965
966 // Save the sparse info to disk. This will generate IO for this entry and
967 // maybe for a child entry, so it is important to do it before deleting this
968 // entry.
969 sparse_.reset();
970
971 // Remove this entry from the list of open entries.
972 backend_->OnEntryDestroyBegin(entry_.address());
973
974 if (doomed_) {
975 DeleteEntryData(true);
976 } else {
[email protected]ea9080b2011-10-20 19:12:25977#if defined(NET_BUILD_STRESS_CACHE)
978 SanityCheck();
979#endif
mikecirone8b85c432016-09-08 19:11:00980 net_log_.AddEvent(net::NetLogEventType::ENTRY_CLOSE);
[email protected]4b3c95dd2011-01-07 23:02:11981 bool ret = true;
982 for (int index = 0; index < kNumStreams; index++) {
983 if (user_buffers_[index].get()) {
[email protected]6547ccdb2014-07-08 23:22:43984 ret = Flush(index, 0);
985 if (!ret)
[email protected]4b3c95dd2011-01-07 23:02:11986 LOG(ERROR) << "Failed to save user data";
987 }
988 if (unreported_size_[index]) {
989 backend_->ModifyStorageSize(
990 entry_.Data()->data_size[index] - unreported_size_[index],
991 entry_.Data()->data_size[index]);
992 }
993 }
994
995 if (!ret) {
996 // There was a failure writing the actual data. Mark the entry as dirty.
997 int current_id = backend_->GetCurrentEntryId();
998 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
999 node_.Store();
[email protected]417567f2012-03-12 18:00:421000 } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
[email protected]4b3c95dd2011-01-07 23:02:111001 node_.Data()->dirty = 0;
1002 node_.Store();
1003 }
1004 }
1005
mikecirone8b85c432016-09-08 19:11:001006 net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL);
[email protected]4b3c95dd2011-01-07 23:02:111007 backend_->OnEntryDestroyEnd();
1008}
1009
1010// ------------------------------------------------------------------------
1011
Maks Orlovich4acf0b582018-06-07 11:39:441012int EntryImpl::InternalReadData(int index,
1013 int offset,
1014 IOBuffer* buf,
1015 int buf_len,
1016 CompletionOnceCallback callback) {
[email protected]4b3c95dd2011-01-07 23:02:111017 DCHECK(node_.Data()->dirty || read_only_);
1018 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
1019 if (index < 0 || index >= kNumStreams)
1020 return net::ERR_INVALID_ARGUMENT;
1021
1022 int entry_size = entry_.Data()->data_size[index];
Eriko Kurimoto27902392025-06-23 13:16:171023 if (offset >= entry_size || !buf_len) {
[email protected]4b3c95dd2011-01-07 23:02:111024 return 0;
Eriko Kurimoto27902392025-06-23 13:16:171025 }
[email protected]4b3c95dd2011-01-07 23:02:111026
Eriko Kurimoto27902392025-06-23 13:16:171027 if (offset < 0 || buf_len < 0) {
[email protected]4b3c95dd2011-01-07 23:02:111028 return net::ERR_INVALID_ARGUMENT;
Eriko Kurimoto27902392025-06-23 13:16:171029 }
[email protected]4b3c95dd2011-01-07 23:02:111030
[email protected]11fbca0b2013-06-02 23:37:211031 if (!backend_.get())
[email protected]4739f7092012-03-20 23:15:101032 return net::ERR_UNEXPECTED;
1033
Matthew Dentona21ca7b2019-03-13 19:02:321034 int end_offset;
1035 if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1036 end_offset > entry_size)
[email protected]4b3c95dd2011-01-07 23:02:111037 buf_len = entry_size - offset;
1038
1039 UpdateRank(false);
1040
1041 backend_->OnEvent(Stats::READ_DATA);
1042 backend_->OnRead(buf_len);
1043
1044 Addr address(entry_.Data()->data_addr[index]);
1045 int eof = address.is_initialized() ? entry_size : 0;
1046 if (user_buffers_[index].get() &&
1047 user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
1048 // Complete the operation locally.
1049 buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
[email protected]4b3c95dd2011-01-07 23:02:111050 return buf_len;
1051 }
1052
1053 address.set_value(entry_.Data()->data_addr[index]);
[email protected]52c75712011-12-16 01:19:191054 if (!address.is_initialized()) {
1055 DoomImpl();
[email protected]4b3c95dd2011-01-07 23:02:111056 return net::ERR_FAILED;
[email protected]52c75712011-12-16 01:19:191057 }
[email protected]4b3c95dd2011-01-07 23:02:111058
1059 File* file = GetBackingFile(address, index);
[email protected]52c75712011-12-16 01:19:191060 if (!file) {
1061 DoomImpl();
[email protected]eab2e0412013-04-26 07:09:501062 LOG(ERROR) << "No file for " << std::hex << address.value();
1063 return net::ERR_FILE_NOT_FOUND;
[email protected]52c75712011-12-16 01:19:191064 }
[email protected]4b3c95dd2011-01-07 23:02:111065
1066 size_t file_offset = offset;
1067 if (address.is_block_file()) {
1068 DCHECK_LE(offset + buf_len, kMaxBlockSize);
1069 file_offset += address.start_block() * address.BlockSize() +
1070 kBlockHeaderSize;
1071 }
1072
Raul Tambre94493c652019-03-11 17:18:351073 SyncCallback* io_callback = nullptr;
Maks Orlovich4acf0b582018-06-07 11:39:441074 bool null_callback = callback.is_null();
1075 if (!null_callback) {
1076 io_callback =
1077 new SyncCallback(base::WrapRefCounted(this), buf, std::move(callback),
1078 net::NetLogEventType::ENTRY_READ_DATA);
[email protected]4b3c95dd2011-01-07 23:02:111079 }
1080
1081 bool completed;
Maks Orlovicha00fe582025-06-12 22:17:201082 if (!file->Read(buf->first(base::checked_cast<size_t>(buf_len)), file_offset,
1083 io_callback, &completed)) {
[email protected]4b3c95dd2011-01-07 23:02:111084 if (io_callback)
1085 io_callback->Discard();
[email protected]52c75712011-12-16 01:19:191086 DoomImpl();
[email protected]eab2e0412013-04-26 07:09:501087 return net::ERR_CACHE_READ_FAILURE;
[email protected]4b3c95dd2011-01-07 23:02:111088 }
1089
1090 if (io_callback && completed)
1091 io_callback->Discard();
1092
Maks Orlovich4acf0b582018-06-07 11:39:441093 return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
[email protected]4b3c95dd2011-01-07 23:02:111094}
1095
Maks Orlovich4acf0b582018-06-07 11:39:441096int EntryImpl::InternalWriteData(int index,
1097 int offset,
1098 IOBuffer* buf,
1099 int buf_len,
1100 CompletionOnceCallback callback,
John Abd-El-Malek0dcbbcfd2023-03-21 00:27:501101 bool truncate) {
[email protected]4b3c95dd2011-01-07 23:02:111102 DCHECK(node_.Data()->dirty || read_only_);
1103 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1104 if (index < 0 || index >= kNumStreams)
1105 return net::ERR_INVALID_ARGUMENT;
1106
1107 if (offset < 0 || buf_len < 0)
1108 return net::ERR_INVALID_ARGUMENT;
1109
[email protected]11fbca0b2013-06-02 23:37:211110 if (!backend_.get())
[email protected]4739f7092012-03-20 23:15:101111 return net::ERR_UNEXPECTED;
1112
[email protected]4b3c95dd2011-01-07 23:02:111113 int max_file_size = backend_->MaxFileSize();
1114
Matthew Dentona21ca7b2019-03-13 19:02:321115 int end_offset;
[email protected]4b3c95dd2011-01-07 23:02:111116 if (offset > max_file_size || buf_len > max_file_size ||
Matthew Dentona21ca7b2019-03-13 19:02:321117 !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) ||
1118 end_offset > max_file_size) {
1119 int size = base::CheckAdd(offset, buf_len)
1120 .ValueOrDefault(std::numeric_limits<int32_t>::max());
[email protected]4b3c95dd2011-01-07 23:02:111121 backend_->TooMuchStorageRequested(size);
1122 return net::ERR_FAILED;
1123 }
1124
[email protected]4b3c95dd2011-01-07 23:02:111125 // Read the size at this point (it may change inside prepare).
1126 int entry_size = entry_.Data()->data_size[index];
1127 bool extending = entry_size < offset + buf_len;
1128 truncate = truncate && entry_size > offset + buf_len;
[email protected]4b3c95dd2011-01-07 23:02:111129 if (!PrepareTarget(index, offset, buf_len, truncate))
1130 return net::ERR_FAILED;
1131
[email protected]4b3c95dd2011-01-07 23:02:111132 if (extending || truncate)
1133 UpdateSize(index, entry_size, offset + buf_len);
1134
1135 UpdateRank(true);
1136
1137 backend_->OnEvent(Stats::WRITE_DATA);
1138 backend_->OnWrite(buf_len);
1139
1140 if (user_buffers_[index].get()) {
1141 // Complete the operation locally.
1142 user_buffers_[index]->Write(offset, buf, buf_len);
[email protected]4b3c95dd2011-01-07 23:02:111143 return buf_len;
1144 }
1145
1146 Addr address(entry_.Data()->data_addr[index]);
1147 if (offset + buf_len == 0) {
1148 if (truncate) {
1149 DCHECK(!address.is_initialized());
1150 }
1151 return 0;
1152 }
1153
1154 File* file = GetBackingFile(address, index);
1155 if (!file)
[email protected]eab2e0412013-04-26 07:09:501156 return net::ERR_FILE_NOT_FOUND;
[email protected]4b3c95dd2011-01-07 23:02:111157
1158 size_t file_offset = offset;
1159 if (address.is_block_file()) {
1160 DCHECK_LE(offset + buf_len, kMaxBlockSize);
1161 file_offset += address.start_block() * address.BlockSize() +
1162 kBlockHeaderSize;
1163 } else if (truncate || (extending && !buf_len)) {
1164 if (!file->SetLength(offset + buf_len))
1165 return net::ERR_FAILED;
1166 }
1167
1168 if (!buf_len)
1169 return 0;
1170
Raul Tambre94493c652019-03-11 17:18:351171 SyncCallback* io_callback = nullptr;
Maks Orlovich4acf0b582018-06-07 11:39:441172 bool null_callback = callback.is_null();
1173 if (!null_callback) {
John Abd-El-Malek0dcbbcfd2023-03-21 00:27:501174 io_callback = new SyncCallback(this, buf, std::move(callback),
mikecirone8b85c432016-09-08 19:11:001175 net::NetLogEventType::ENTRY_WRITE_DATA);
[email protected]4b3c95dd2011-01-07 23:02:111176 }
1177
1178 bool completed;
Maks Orlovicha00fe582025-06-12 22:17:201179 if (!file->Write(buf->first(base::checked_cast<size_t>(buf_len)), file_offset,
1180 io_callback, &completed)) {
[email protected]4b3c95dd2011-01-07 23:02:111181 if (io_callback)
1182 io_callback->Discard();
[email protected]eab2e0412013-04-26 07:09:501183 return net::ERR_CACHE_WRITE_FAILURE;
[email protected]4b3c95dd2011-01-07 23:02:111184 }
1185
1186 if (io_callback && completed)
1187 io_callback->Discard();
1188
John Abd-El-Malek0dcbbcfd2023-03-21 00:27:501189 return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING;
[email protected]4b3c95dd2011-01-07 23:02:111190}
1191
[email protected]18995e22009-08-31 17:35:141192// ------------------------------------------------------------------------
1193
[email protected]bdad42a42008-07-31 21:28:481194bool EntryImpl::CreateDataBlock(int index, int size) {
[email protected]55185492009-06-25 17:28:311195 DCHECK(index >= 0 && index < kNumStreams);
[email protected]bdad42a42008-07-31 21:28:481196
[email protected]9b2dda12009-03-18 23:22:331197 Addr address(entry_.Data()->data_addr[index]);
[email protected]bdad42a42008-07-31 21:28:481198 if (!CreateBlock(size, &address))
1199 return false;
1200
1201 entry_.Data()->data_addr[index] = address.value();
1202 entry_.Store();
1203 return true;
1204}
1205
1206bool EntryImpl::CreateBlock(int size, Addr* address) {
1207 DCHECK(!address->is_initialized());
[email protected]11fbca0b2013-06-02 23:37:211208 if (!backend_.get())
[email protected]4739f7092012-03-20 23:15:101209 return false;
[email protected]bdad42a42008-07-31 21:28:481210
1211 FileType file_type = Addr::RequiredFileType(size);
1212 if (EXTERNAL == file_type) {
1213 if (size > backend_->MaxFileSize())
1214 return false;
1215 if (!backend_->CreateExternalFile(address))
1216 return false;
1217 } else {
[email protected]8a09bc762013-05-24 00:47:231218 int num_blocks = Addr::RequiredBlocks(size, file_type);
[email protected]bdad42a42008-07-31 21:28:481219
1220 if (!backend_->CreateBlock(file_type, num_blocks, address))
1221 return false;
1222 }
1223 return true;
1224}
1225
[email protected]65188eb2010-09-16 20:59:291226// Note that this method may end up modifying a block file so upon return the
1227// involved block will be free, and could be reused for something else. If there
1228// is a crash after that point (and maybe before returning to the caller), the
1229// entry will be left dirty... and at some point it will be discarded; it is
1230// important that the entry doesn't keep a reference to this address, or we'll
1231// end up deleting the contents of |address| once again.
[email protected]bdad42a42008-07-31 21:28:481232void EntryImpl::DeleteData(Addr address, int index) {
[email protected]11fbca0b2013-06-02 23:37:211233 DCHECK(backend_.get());
[email protected]bdad42a42008-07-31 21:28:481234 if (!address.is_initialized())
1235 return;
1236 if (address.is_separate_file()) {
Yutaka Hirano238ce652022-04-19 05:20:401237 int failure = !base::DeleteFile(backend_->GetFileName(address));
[email protected]10f2e692010-09-29 21:00:351238 if (failure) {
[email protected]cfaa1f22009-10-12 17:14:591239 LOG(ERROR) << "Failed to delete " <<
1240 backend_->GetFileName(address).value() << " from the cache.";
[email protected]10f2e692010-09-29 21:00:351241 }
[email protected]90499482013-06-01 00:39:501242 if (files_[index].get())
Raul Tambre94493c652019-03-11 17:18:351243 files_[index] = nullptr; // Releases the object.
[email protected]bdad42a42008-07-31 21:28:481244 } else {
1245 backend_->DeleteBlock(address, true);
1246 }
1247}
1248
1249void EntryImpl::UpdateRank(bool modified) {
[email protected]11fbca0b2013-06-02 23:37:211250 if (!backend_.get())
[email protected]4739f7092012-03-20 23:15:101251 return;
1252
[email protected]bdad42a42008-07-31 21:28:481253 if (!doomed_) {
1254 // Everything is handled by the backend.
[email protected]67b09ec2010-08-27 17:49:421255 backend_->UpdateRank(this, modified);
[email protected]bdad42a42008-07-31 21:28:481256 return;
1257 }
1258
1259 Time current = Time::Now();
Maks Orlovich31db0fc02025-03-25 13:17:141260 auto timestamp = current.ToInternalValue();
1261 auto* node_data = node_.Data();
1262 node_data->last_used = timestamp;
1263 node_data->no_longer_used_last_modified = timestamp;
[email protected]bdad42a42008-07-31 21:28:481264}
1265
1266File* EntryImpl::GetBackingFile(Addr address, int index) {
[email protected]11fbca0b2013-06-02 23:37:211267 if (!backend_.get())
Raul Tambre94493c652019-03-11 17:18:351268 return nullptr;
[email protected]4739f7092012-03-20 23:15:101269
[email protected]bdad42a42008-07-31 21:28:481270 File* file;
1271 if (address.is_separate_file())
1272 file = GetExternalFile(address, index);
1273 else
1274 file = backend_->File(address);
1275 return file;
1276}
1277
1278File* EntryImpl::GetExternalFile(Addr address, int index) {
[email protected]62cdf1e2008-12-04 23:03:331279 DCHECK(index >= 0 && index <= kKeyFileIndex);
[email protected]bdad42a42008-07-31 21:28:481280 if (!files_[index].get()) {
1281 // For a key file, use mixed mode IO.
Tsuyoshi Horo2c0a5042022-07-06 05:53:071282 auto file = base::MakeRefCounted<File>(kKeyFileIndex == index);
[email protected]f294da72009-10-12 21:39:371283 if (file->Init(backend_->GetFileName(address)))
[email protected]bdad42a42008-07-31 21:28:481284 files_[index].swap(file);
1285 }
1286 return files_[index].get();
1287}
1288
[email protected]e1fcf142010-08-23 18:47:251289// We keep a memory buffer for everything that ends up stored on a block file
1290// (because we don't know yet the final data size), and for some of the data
1291// that end up on external files. This function will initialize that memory
1292// buffer and / or the files needed to store the data.
1293//
1294// In general, a buffer may overlap data already stored on disk, and in that
1295// case, the contents of the buffer are the most accurate. It may also extend
1296// the file, but we don't want to read from disk just to keep the buffer up to
1297// date. This means that as soon as there is a chance to get confused about what
1298// is the most recent version of some part of a file, we'll flush the buffer and
1299// reuse it for the new data. Keep in mind that the normal use pattern is quite
1300// simple (write sequentially from the beginning), so we optimize for handling
1301// that case.
initial.commit586acc5fe2008-07-26 22:42:521302bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1303 bool truncate) {
[email protected]e1fcf142010-08-23 18:47:251304 if (truncate)
1305 return HandleTruncation(index, offset, buf_len);
1306
[email protected]1bf71ed2010-10-01 16:31:101307 if (!offset && !buf_len)
1308 return true;
1309
initial.commit586acc5fe2008-07-26 22:42:521310 Addr address(entry_.Data()->data_addr[index]);
[email protected]e1fcf142010-08-23 18:47:251311 if (address.is_initialized()) {
1312 if (address.is_block_file() && !MoveToLocalBuffer(index))
1313 return false;
[email protected]e7f29642009-03-02 22:53:181314
[email protected]e1fcf142010-08-23 18:47:251315 if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1316 // We are about to create a buffer for the first 16KB, make sure that we
1317 // preserve existing data.
1318 if (!CopyToLocalBuffer(index))
1319 return false;
1320 }
1321 }
initial.commit586acc5fe2008-07-26 22:42:521322
[email protected]e1fcf142010-08-23 18:47:251323 if (!user_buffers_[index].get())
Peter Boström8a7540692021-04-05 20:48:201324 user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
initial.commit586acc5fe2008-07-26 22:42:521325
[email protected]e1fcf142010-08-23 18:47:251326 return PrepareBuffer(index, offset, buf_len);
initial.commit586acc5fe2008-07-26 22:42:521327}
1328
1329// We get to this function with some data already stored. If there is a
1330// truncation that results on data stored internally, we'll explicitly
1331// handle the case here.
[email protected]e1fcf142010-08-23 18:47:251332bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
initial.commit586acc5fe2008-07-26 22:42:521333 Addr address(entry_.Data()->data_addr[index]);
1334
[email protected]e1fcf142010-08-23 18:47:251335 int current_size = entry_.Data()->data_size[index];
1336 int new_size = offset + buf_len;
1337
Maks Orlovich7d10fb02019-03-29 21:02:081338 // This is only called when actually truncating the file, not simply when
1339 // truncate = true is passed to WriteData(), which could be growing the file.
1340 DCHECK_LT(new_size, current_size);
1341
1342 if (new_size == 0) {
[email protected]e1fcf142010-08-23 18:47:251343 // This is by far the most common scenario.
[email protected]e1fcf142010-08-23 18:47:251344 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1345 entry_.Data()->data_addr[index] = 0;
1346 entry_.Data()->data_size[index] = 0;
1347 unreported_size_[index] = 0;
1348 entry_.Store();
[email protected]65188eb2010-09-16 20:59:291349 DeleteData(address, index);
[email protected]e1fcf142010-08-23 18:47:251350
1351 user_buffers_[index].reset();
1352 return true;
1353 }
1354
1355 // We never postpone truncating a file, if there is one, but we may postpone
1356 // telling the backend about the size reduction.
1357 if (user_buffers_[index].get()) {
1358 DCHECK_GE(current_size, user_buffers_[index]->Start());
1359 if (!address.is_initialized()) {
1360 // There is no overlap between the buffer and disk.
1361 if (new_size > user_buffers_[index]->Start()) {
Maks Orlovich7d10fb02019-03-29 21:02:081362 // Truncate our buffer.
[email protected]e1fcf142010-08-23 18:47:251363 DCHECK_LT(new_size, user_buffers_[index]->End());
1364 user_buffers_[index]->Truncate(new_size);
Maks Orlovich7d10fb02019-03-29 21:02:081365
1366 if (offset < user_buffers_[index]->Start()) {
1367 // Request to write before the current buffer's start, so flush it to
1368 // disk and re-init.
1369 UpdateSize(index, current_size, new_size);
1370 if (!Flush(index, 0))
1371 return false;
1372 return PrepareBuffer(index, offset, buf_len);
1373 } else {
1374 // Can just stick to using the memory buffer.
1375 return true;
1376 }
[email protected]e1fcf142010-08-23 18:47:251377 }
1378
Maks Orlovich7d10fb02019-03-29 21:02:081379 // Truncated to before the current buffer, so can just discard it.
[email protected]e1fcf142010-08-23 18:47:251380 user_buffers_[index]->Reset();
1381 return PrepareBuffer(index, offset, buf_len);
initial.commit586acc5fe2008-07-26 22:42:521382 }
[email protected]e1fcf142010-08-23 18:47:251383
1384 // There is some overlap or we need to extend the file before the
1385 // truncation.
1386 if (offset > user_buffers_[index]->Start())
1387 user_buffers_[index]->Truncate(new_size);
1388 UpdateSize(index, current_size, new_size);
[email protected]9cb851f2010-09-29 17:32:401389 if (!Flush(index, 0))
[email protected]e1fcf142010-08-23 18:47:251390 return false;
1391 user_buffers_[index].reset();
initial.commit586acc5fe2008-07-26 22:42:521392 }
1393
[email protected]e1fcf142010-08-23 18:47:251394 // We have data somewhere, and it is not in a buffer.
1395 DCHECK(!user_buffers_[index].get());
1396 DCHECK(address.is_initialized());
initial.commit586acc5fe2008-07-26 22:42:521397
[email protected]e1fcf142010-08-23 18:47:251398 if (new_size > kMaxBlockSize)
1399 return true; // Let the operation go directly to disk.
initial.commit586acc5fe2008-07-26 22:42:521400
[email protected]e1fcf142010-08-23 18:47:251401 return ImportSeparateFile(index, offset + buf_len);
initial.commit586acc5fe2008-07-26 22:42:521402}
1403
[email protected]e1fcf142010-08-23 18:47:251404bool EntryImpl::CopyToLocalBuffer(int index) {
initial.commit586acc5fe2008-07-26 22:42:521405 Addr address(entry_.Data()->data_addr[index]);
1406 DCHECK(!user_buffers_[index].get());
1407 DCHECK(address.is_initialized());
[email protected]e1fcf142010-08-23 18:47:251408
1409 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
Peter Boström8a7540692021-04-05 20:48:201410 user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get());
Raul Tambre94493c652019-03-11 17:18:351411 user_buffers_[index]->Write(len, nullptr, 0);
initial.commit586acc5fe2008-07-26 22:42:521412
1413 File* file = GetBackingFile(address, index);
[email protected]e1fcf142010-08-23 18:47:251414 int offset = 0;
initial.commit586acc5fe2008-07-26 22:42:521415
1416 if (address.is_block_file())
1417 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1418
Maks Orlovicha00fe582025-06-12 22:17:201419 if (!file || !file->Read(user_buffers_[index]->as_span().first(
1420 base::checked_cast<size_t>(len)),
1421 offset, nullptr, nullptr)) {
[email protected]e1fcf142010-08-23 18:47:251422 user_buffers_[index].reset();
1423 return false;
1424 }
1425 return true;
1426}
1427
1428bool EntryImpl::MoveToLocalBuffer(int index) {
1429 if (!CopyToLocalBuffer(index))
initial.commit586acc5fe2008-07-26 22:42:521430 return false;
1431
[email protected]e1fcf142010-08-23 18:47:251432 Addr address(entry_.Data()->data_addr[index]);
initial.commit586acc5fe2008-07-26 22:42:521433 entry_.Data()->data_addr[index] = 0;
1434 entry_.Store();
[email protected]65188eb2010-09-16 20:59:291435 DeleteData(address, index);
initial.commit586acc5fe2008-07-26 22:42:521436
1437 // If we lose this entry we'll see it as zero sized.
[email protected]e1fcf142010-08-23 18:47:251438 int len = entry_.Data()->data_size[index];
1439 backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1440 unreported_size_[index] = len;
initial.commit586acc5fe2008-07-26 22:42:521441 return true;
1442}
1443
[email protected]e1fcf142010-08-23 18:47:251444bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1445 if (entry_.Data()->data_size[index] > new_size)
1446 UpdateSize(index, entry_.Data()->data_size[index], new_size);
1447
1448 return MoveToLocalBuffer(index);
1449}
1450
1451bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1452 DCHECK(user_buffers_[index].get());
[email protected]42d5be692010-11-04 17:34:381453 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1454 offset > entry_.Data()->data_size[index]) {
1455 // We are about to extend the buffer or the file (with zeros), so make sure
1456 // that we are not overwriting anything.
[email protected]e1fcf142010-08-23 18:47:251457 Addr address(entry_.Data()->data_addr[index]);
1458 if (address.is_initialized() && address.is_separate_file()) {
[email protected]42d5be692010-11-04 17:34:381459 if (!Flush(index, 0))
[email protected]e1fcf142010-08-23 18:47:251460 return false;
[email protected]42d5be692010-11-04 17:34:381461 // There is an actual file already, and we don't want to keep track of
1462 // its length so we let this operation go straight to disk.
1463 // The only case when a buffer is allowed to extend the file (as in fill
1464 // with zeros before the start) is when there is no file yet to extend.
1465 user_buffers_[index].reset();
1466 return true;
[email protected]e1fcf142010-08-23 18:47:251467 }
[email protected]bdad42a42008-07-31 21:28:481468 }
1469
[email protected]e1fcf142010-08-23 18:47:251470 if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
[email protected]9cb851f2010-09-29 17:32:401471 if (!Flush(index, offset + buf_len))
[email protected]e1fcf142010-08-23 18:47:251472 return false;
[email protected]bdad42a42008-07-31 21:28:481473
[email protected]e1fcf142010-08-23 18:47:251474 // Lets try again.
[email protected]42d5be692010-11-04 17:34:381475 if (offset > user_buffers_[index]->End() ||
1476 !user_buffers_[index]->PreWrite(offset, buf_len)) {
[email protected]e1fcf142010-08-23 18:47:251477 // We cannot complete the operation with a buffer.
1478 DCHECK(!user_buffers_[index]->Size());
1479 DCHECK(!user_buffers_[index]->Start());
1480 user_buffers_[index].reset();
1481 }
1482 }
[email protected]bdad42a42008-07-31 21:28:481483 return true;
1484}
1485
[email protected]9cb851f2010-09-29 17:32:401486bool EntryImpl::Flush(int index, int min_len) {
initial.commit586acc5fe2008-07-26 22:42:521487 Addr address(entry_.Data()->data_addr[index]);
1488 DCHECK(user_buffers_[index].get());
[email protected]9cb851f2010-09-29 17:32:401489 DCHECK(!address.is_initialized() || address.is_separate_file());
[email protected]42d5be692010-11-04 17:34:381490 DVLOG(3) << "Flush";
[email protected]9cb851f2010-09-29 17:32:401491
1492 int size = std::max(entry_.Data()->data_size[index], min_len);
[email protected]1bf71ed2010-10-01 16:31:101493 if (size && !address.is_initialized() && !CreateDataBlock(index, size))
[email protected]9cb851f2010-09-29 17:32:401494 return false;
initial.commit586acc5fe2008-07-26 22:42:521495
[email protected]e1fcf142010-08-23 18:47:251496 if (!entry_.Data()->data_size[index]) {
1497 DCHECK(!user_buffers_[index]->Size());
initial.commit586acc5fe2008-07-26 22:42:521498 return true;
[email protected]e1fcf142010-08-23 18:47:251499 }
initial.commit586acc5fe2008-07-26 22:42:521500
initial.commit586acc5fe2008-07-26 22:42:521501 address.set_value(entry_.Data()->data_addr[index]);
1502
[email protected]e1fcf142010-08-23 18:47:251503 int len = user_buffers_[index]->Size();
1504 int offset = user_buffers_[index]->Start();
[email protected]9cb851f2010-09-29 17:32:401505 if (!len && !offset)
1506 return true;
1507
[email protected]e1fcf142010-08-23 18:47:251508 if (address.is_block_file()) {
1509 DCHECK_EQ(len, entry_.Data()->data_size[index]);
1510 DCHECK(!offset);
initial.commit586acc5fe2008-07-26 22:42:521511 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
[email protected]e1fcf142010-08-23 18:47:251512 }
initial.commit586acc5fe2008-07-26 22:42:521513
[email protected]9cb851f2010-09-29 17:32:401514 File* file = GetBackingFile(address, index);
initial.commit586acc5fe2008-07-26 22:42:521515 if (!file)
1516 return false;
1517
Maks Orlovicha00fe582025-06-12 22:17:201518 if (!file->Write(user_buffers_[index]->as_span().first(
1519 base::checked_cast<size_t>(len)),
1520 offset, nullptr, nullptr)) {
[email protected]e1fcf142010-08-23 18:47:251521 return false;
Maks Orlovicha00fe582025-06-12 22:17:201522 }
[email protected]e1fcf142010-08-23 18:47:251523 user_buffers_[index]->Reset();
initial.commit586acc5fe2008-07-26 22:42:521524
initial.commit586acc5fe2008-07-26 22:42:521525 return true;
1526}
1527
[email protected]e1fcf142010-08-23 18:47:251528void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1529 if (entry_.Data()->data_size[index] == new_size)
1530 return;
1531
1532 unreported_size_[index] += new_size - old_size;
1533 entry_.Data()->data_size[index] = new_size;
1534 entry_.set_modified();
1535}
1536
[email protected]84d4cee2009-06-18 23:46:581537int EntryImpl::InitSparseData() {
1538 if (sparse_.get())
1539 return net::OK;
1540
[email protected]fb2622f2010-07-13 18:00:561541 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
Tsuyoshi Horof8861cb2022-07-05 23:50:201542 auto sparse = std::make_unique<SparseControl>(this);
[email protected]fb2622f2010-07-13 18:00:561543 int result = sparse->Init();
1544 if (net::OK == result)
1545 sparse_.swap(sparse);
1546
[email protected]84d4cee2009-06-18 23:46:581547 return result;
1548}
1549
avid0181f32015-12-10 19:41:471550void EntryImpl::SetEntryFlags(uint32_t flags) {
[email protected]7d4e3a82009-07-09 22:01:291551 entry_.Data()->flags |= flags;
1552 entry_.set_modified();
1553}
1554
avid0181f32015-12-10 19:41:471555uint32_t EntryImpl::GetEntryFlags() {
[email protected]7d4e3a82009-07-09 22:01:291556 return entry_.Data()->flags;
1557}
1558
Tsuyoshi Horo3e843cb2022-07-21 03:09:541559void EntryImpl::GetData(int index,
Maks Orlovicha00fe582025-06-12 22:17:201560 base::HeapArray<uint8_t>* buffer,
Tsuyoshi Horo3e843cb2022-07-21 03:09:541561 Addr* address) {
[email protected]11fbca0b2013-06-02 23:37:211562 DCHECK(backend_.get());
[email protected]e1fcf142010-08-23 18:47:251563 if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1564 !user_buffers_[index]->Start()) {
[email protected]e7e99322010-05-04 23:30:171565 // The data is already in memory, just copy it and we're done.
[email protected]7d4e3a82009-07-09 22:01:291566 int data_len = entry_.Data()->data_size[index];
[email protected]e1fcf142010-08-23 18:47:251567 if (data_len <= user_buffers_[index]->Size()) {
1568 DCHECK(!user_buffers_[index]->Start());
Maks Orlovicha00fe582025-06-12 22:17:201569 *buffer = base::HeapArray<uint8_t>::Uninit(data_len);
1570 buffer->as_span().copy_from_nonoverlapping(
1571 user_buffers_[index]->as_span().first(buffer->size()));
[email protected]e1fcf142010-08-23 18:47:251572 return;
1573 }
[email protected]7d4e3a82009-07-09 22:01:291574 }
1575
1576 // Bad news: we'd have to read the info from disk so instead we'll just tell
1577 // the caller where to read from.
Piotr Bialecki62eb5462024-12-10 21:00:301578 *buffer = {};
[email protected]7d4e3a82009-07-09 22:01:291579 address->set_value(entry_.Data()->data_addr[index]);
1580 if (address->is_initialized()) {
1581 // Prevent us from deleting the block from the backing store.
1582 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1583 unreported_size_[index], 0);
1584 entry_.Data()->data_addr[index] = 0;
1585 entry_.Data()->data_size[index] = 0;
1586 }
1587}
1588
Maks Orlovich98c00d322025-05-30 14:44:441589base::span<char> EntryImpl::InternalKeySpan() const {
1590 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
1591 Addr key_addr(entry->Data()->long_key);
1592 CHECK(!key_addr.is_initialized());
1593
1594 int num_blocks = entry_.address().num_blocks();
1595 size_t max_key_size = sizeof(EntryStore) - offsetof(EntryStore, key);
1596 if (num_blocks > 1) {
1597 max_key_size += sizeof(EntryStore) * (num_blocks - 1);
1598 }
1599
1600 // Safety: this depends on BackendImpl::CreateEntryImpl allocating the right
1601 // amount of space using EntryImpl::NumBlocksForEntry, EntryImpl::SanityCheck
1602 // checking the consistency of fields when opening the entry, and `entry_`
1603 // mechanics making sure that entry_.address().num_blocks() *
1604 // sizeof(EntryStore) bytes are mapped.
1605 return UNSAFE_BUFFERS(base::span(entry->Data()->key, max_key_size));
1606}
1607
initial.commit586acc5fe2008-07-26 22:42:521608} // namespace disk_cache