Avi Drissman | 6459548 | 2022-09-14 20:52:29 | [diff] [blame] | 1 | // Copyright 2012 The Chromium Authors |
license.bot | bf09a50 | 2008-08-24 00:55:55 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 4 | |
[email protected] | c2c5cfc | 2014-03-03 16:35:28 | [diff] [blame] | 5 | #include "net/disk_cache/blockfile/entry_impl.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 6 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 7 | #include <algorithm> |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 8 | #include <limits> |
Peter Boström | 8a754069 | 2021-04-05 20:48:20 | [diff] [blame] | 9 | #include <memory> |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 10 | |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 11 | #include "base/compiler_specific.h" |
Piotr Bialecki | 62eb546 | 2024-12-10 21:00:30 | [diff] [blame] | 12 | #include "base/containers/heap_array.h" |
Yutaka Hirano | 238ce65 | 2022-04-19 05:20:40 | [diff] [blame] | 13 | #include "base/files/file_util.h" |
Daniel Cheng | c058199 | 2019-03-29 04:52:56 | [diff] [blame] | 14 | #include "base/hash/hash.h" |
Matthew Denton | a21ca7b | 2019-03-13 19:02:32 | [diff] [blame] | 15 | #include "base/numerics/safe_math.h" |
Maks Orlovich | 955b5a7 | 2025-06-11 21:46:32 | [diff] [blame] | 16 | #include "base/strings/cstring_view.h" |
[email protected] | be528af | 2013-06-11 07:39:48 | [diff] [blame] | 17 | #include "base/strings/string_util.h" |
Daniel Cheng | 17390fd | 2025-06-07 06:38:26 | [diff] [blame] | 18 | #include "base/strings/string_view_util.h" |
Gabriel Charette | d87f10f | 2022-03-31 00:44:22 | [diff] [blame] | 19 | #include "base/time/time.h" |
[email protected] | 74a85ce | 2009-02-12 00:03:19 | [diff] [blame] | 20 | #include "net/base/io_buffer.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 21 | #include "net/base/net_errors.h" |
[email protected] | c2c5cfc | 2014-03-03 16:35:28 | [diff] [blame] | 22 | #include "net/disk_cache/blockfile/backend_impl.h" |
| 23 | #include "net/disk_cache/blockfile/bitmap.h" |
| 24 | #include "net/disk_cache/blockfile/disk_format.h" |
[email protected] | c2c5cfc | 2014-03-03 16:35:28 | [diff] [blame] | 25 | #include "net/disk_cache/blockfile/sparse_control.h" |
[email protected] | a88d601f | 2008-08-15 20:36:21 | [diff] [blame] | 26 | #include "net/disk_cache/cache_util.h" |
[email protected] | 9eb8cdf | 2011-03-17 18:53:02 | [diff] [blame] | 27 | #include "net/disk_cache/net_log_parameters.h" |
mikecirone | f22f981 | 2016-10-04 03:40:19 | [diff] [blame] | 28 | #include "net/log/net_log.h" |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 29 | #include "net/log/net_log_event_type.h" |
| 30 | #include "net/log/net_log_source_type.h" |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 31 | |
[email protected] | e1acf6f | 2008-10-27 20:43:33 | [diff] [blame] | 32 | using base::Time; |
[email protected] | b36a7bd | 2010-02-22 23:14:23 | [diff] [blame] | 33 | using base::TimeTicks; |
[email protected] | e1acf6f | 2008-10-27 20:43:33 | [diff] [blame] | 34 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 35 | namespace { |
| 36 | |
[email protected] | 62cdf1e | 2008-12-04 23:03:33 | [diff] [blame] | 37 | // Index for the file used to store the key, if any (files_[kKeyFileIndex]). |
| 38 | const int kKeyFileIndex = 3; |
| 39 | |
[email protected] | 17b8914 | 2008-11-07 21:52:15 | [diff] [blame] | 40 | // This class implements FileIOCallback to buffer the callback from a file IO |
| 41 | // operation from the actual net class. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 42 | class SyncCallback: public disk_cache::FileIOCallback { |
| 43 | public: |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 44 | // |end_event_type| is the event type to log on completion. Logs nothing on |
| 45 | // discard, or when the NetLog is not set to log all events. |
tzik | 54237ca8c | 2017-03-08 03:17:24 | [diff] [blame] | 46 | SyncCallback(scoped_refptr<disk_cache::EntryImpl> entry, |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 47 | net::IOBuffer* buffer, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 48 | net::CompletionOnceCallback callback, |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 49 | net::NetLogEventType end_event_type) |
tzik | 54237ca8c | 2017-03-08 03:17:24 | [diff] [blame] | 50 | : entry_(std::move(entry)), |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 51 | callback_(std::move(callback)), |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 52 | buf_(buffer), |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 53 | end_event_type_(end_event_type) { |
tzik | 54237ca8c | 2017-03-08 03:17:24 | [diff] [blame] | 54 | entry_->IncrementIoCount(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 55 | } |
Peter Boström | 293b134 | 2021-09-22 17:31:43 | [diff] [blame] | 56 | |
| 57 | SyncCallback(const SyncCallback&) = delete; |
| 58 | SyncCallback& operator=(const SyncCallback&) = delete; |
| 59 | |
Chris Watkins | 68b1503 | 2017-12-01 03:07:13 | [diff] [blame] | 60 | ~SyncCallback() override = default; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 61 | |
dcheng | b03027d | 2014-10-21 12:00:20 | [diff] [blame] | 62 | void OnFileIOComplete(int bytes_copied) override; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 63 | void Discard(); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 64 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 65 | private: |
tzik | 54237ca8c | 2017-03-08 03:17:24 | [diff] [blame] | 66 | scoped_refptr<disk_cache::EntryImpl> entry_; |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 67 | net::CompletionOnceCallback callback_; |
[email protected] | 74a85ce | 2009-02-12 00:03:19 | [diff] [blame] | 68 | scoped_refptr<net::IOBuffer> buf_; |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 69 | const net::NetLogEventType end_event_type_; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 70 | }; |
| 71 | |
| 72 | void SyncCallback::OnFileIOComplete(int bytes_copied) { |
| 73 | entry_->DecrementIoCount(); |
[email protected] | 2a65aceb8 | 2011-12-19 20:59:27 | [diff] [blame] | 74 | if (!callback_.is_null()) { |
eroman | 24bc6a1 | 2015-05-06 19:55:48 | [diff] [blame] | 75 | if (entry_->net_log().IsCapturing()) { |
Eric Roman | 06bd974 | 2019-07-13 15:19:13 | [diff] [blame] | 76 | disk_cache::NetLogReadWriteComplete(entry_->net_log(), end_event_type_, |
| 77 | net::NetLogEventPhase::END, |
| 78 | bytes_copied); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 79 | } |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 80 | buf_ = nullptr; // Release the buffer before invoking the callback. |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 81 | std::move(callback_).Run(bytes_copied); |
[email protected] | 447baad | 2009-08-28 23:05:46 | [diff] [blame] | 82 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 83 | delete this; |
| 84 | } |
| 85 | |
| 86 | void SyncCallback::Discard() { |
[email protected] | 2a65aceb8 | 2011-12-19 20:59:27 | [diff] [blame] | 87 | callback_.Reset(); |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 88 | buf_ = nullptr; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 89 | OnFileIOComplete(0); |
| 90 | } |
| 91 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 92 | const int kMaxBufferSize = 1024 * 1024; // 1 MB. |
| 93 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 94 | } // namespace |
| 95 | |
| 96 | namespace disk_cache { |
| 97 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 98 | // This class handles individual memory buffers that store data before it is |
| 99 | // sent to disk. The buffer can start at any offset, but if we try to write to |
| 100 | // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to |
| 101 | // zero. The buffer grows up to a size determined by the backend, to keep the |
| 102 | // total memory used under control. |
| 103 | class EntryImpl::UserBuffer { |
| 104 | public: |
Tsuyoshi Horo | e0235ed6 | 2022-06-09 01:42:30 | [diff] [blame] | 105 | explicit UserBuffer(BackendImpl* backend) : backend_(backend->GetWeakPtr()) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 106 | buffer_.reserve(kMaxBlockSize); |
| 107 | } |
Peter Boström | 293b134 | 2021-09-22 17:31:43 | [diff] [blame] | 108 | |
| 109 | UserBuffer(const UserBuffer&) = delete; |
| 110 | UserBuffer& operator=(const UserBuffer&) = delete; |
| 111 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 112 | ~UserBuffer() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 113 | if (backend_.get()) |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 114 | backend_->BufferDeleted(capacity() - kMaxBlockSize); |
| 115 | } |
| 116 | |
| 117 | // Returns true if we can handle writing |len| bytes to |offset|. |
| 118 | bool PreWrite(int offset, int len); |
| 119 | |
| 120 | // Truncates the buffer to |offset| bytes. |
| 121 | void Truncate(int offset); |
| 122 | |
| 123 | // Writes |len| bytes from |buf| at the given |offset|. |
[email protected] | c67287e | 2012-03-23 01:19:55 | [diff] [blame] | 124 | void Write(int offset, IOBuffer* buf, int len); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 125 | |
| 126 | // Returns true if we can read |len| bytes from |offset|, given that the |
| 127 | // actual file has |eof| bytes stored. Note that the number of bytes to read |
| 128 | // may be modified by this method even though it returns false: that means we |
| 129 | // should do a smaller read from disk. |
| 130 | bool PreRead(int eof, int offset, int* len); |
| 131 | |
| 132 | // Read |len| bytes from |buf| at the given |offset|. |
[email protected] | c67287e | 2012-03-23 01:19:55 | [diff] [blame] | 133 | int Read(int offset, IOBuffer* buf, int len); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 134 | |
| 135 | // Prepare this buffer for reuse. |
| 136 | void Reset(); |
| 137 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 138 | base::span<uint8_t> as_span() { return buffer_; } |
| 139 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 140 | int Size() { return static_cast<int>(buffer_.size()); } |
| 141 | int Start() { return offset_; } |
| 142 | int End() { return offset_ + Size(); } |
| 143 | |
| 144 | private: |
| 145 | int capacity() { return static_cast<int>(buffer_.capacity()); } |
| 146 | bool GrowBuffer(int required, int limit); |
| 147 | |
| 148 | base::WeakPtr<BackendImpl> backend_; |
Tsuyoshi Horo | e0235ed6 | 2022-06-09 01:42:30 | [diff] [blame] | 149 | int offset_ = 0; |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 150 | std::vector<uint8_t> buffer_; |
Tsuyoshi Horo | e0235ed6 | 2022-06-09 01:42:30 | [diff] [blame] | 151 | bool grow_allowed_ = true; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 152 | }; |
| 153 | |
| 154 | bool EntryImpl::UserBuffer::PreWrite(int offset, int len) { |
| 155 | DCHECK_GE(offset, 0); |
| 156 | DCHECK_GE(len, 0); |
| 157 | DCHECK_GE(offset + len, 0); |
| 158 | |
| 159 | // We don't want to write before our current start. |
| 160 | if (offset < offset_) |
| 161 | return false; |
| 162 | |
| 163 | // Lets get the common case out of the way. |
| 164 | if (offset + len <= capacity()) |
| 165 | return true; |
| 166 | |
| 167 | // If we are writing to the first 16K (kMaxBlockSize), we want to keep the |
| 168 | // buffer offset_ at 0. |
| 169 | if (!Size() && offset > kMaxBlockSize) |
| 170 | return GrowBuffer(len, kMaxBufferSize); |
| 171 | |
| 172 | int required = offset - offset_ + len; |
| 173 | return GrowBuffer(required, kMaxBufferSize * 6 / 5); |
| 174 | } |
| 175 | |
| 176 | void EntryImpl::UserBuffer::Truncate(int offset) { |
| 177 | DCHECK_GE(offset, 0); |
| 178 | DCHECK_GE(offset, offset_); |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 179 | DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 180 | |
| 181 | offset -= offset_; |
| 182 | if (Size() >= offset) |
| 183 | buffer_.resize(offset); |
| 184 | } |
| 185 | |
[email protected] | c67287e | 2012-03-23 01:19:55 | [diff] [blame] | 186 | void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 187 | DCHECK_GE(offset, 0); |
| 188 | DCHECK_GE(len, 0); |
| 189 | DCHECK_GE(offset + len, 0); |
Maks Orlovich | 9c1f287cf | 2019-04-08 17:15:00 | [diff] [blame] | 190 | |
| 191 | // 0-length writes that don't extend can just be ignored here, and are safe |
| 192 | // even if they're are before offset_, as truncates are handled elsewhere. |
| 193 | if (len == 0 && offset < End()) |
| 194 | return; |
| 195 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 196 | DCHECK_GE(offset, offset_); |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 197 | DVLOG(3) << "Buffer write at " << offset << " current " << offset_; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 198 | |
| 199 | if (!Size() && offset > kMaxBlockSize) |
| 200 | offset_ = offset; |
| 201 | |
| 202 | offset -= offset_; |
| 203 | |
| 204 | if (offset > Size()) |
| 205 | buffer_.resize(offset); |
| 206 | |
| 207 | if (!len) |
| 208 | return; |
| 209 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 210 | base::span<uint8_t> in_buf = buf->first(len); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 211 | int valid_len = Size() - offset; |
| 212 | int copy_len = std::min(valid_len, len); |
| 213 | if (copy_len) { |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 214 | size_t sz_offset = base::checked_cast<size_t>(offset); |
| 215 | size_t sz_len = base::checked_cast<size_t>(copy_len); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 216 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 217 | base::span(buffer_) |
| 218 | .subspan(sz_offset, sz_len) |
| 219 | .copy_from_nonoverlapping(in_buf.take_first(sz_len)); |
| 220 | } |
| 221 | if (in_buf.empty()) { |
| 222 | return; |
| 223 | } |
| 224 | |
| 225 | buffer_.insert(buffer_.end(), in_buf.begin(), in_buf.end()); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) { |
| 229 | DCHECK_GE(offset, 0); |
| 230 | DCHECK_GT(*len, 0); |
| 231 | |
| 232 | if (offset < offset_) { |
| 233 | // We are reading before this buffer. |
| 234 | if (offset >= eof) |
| 235 | return true; |
| 236 | |
| 237 | // If the read overlaps with the buffer, change its length so that there is |
| 238 | // no overlap. |
| 239 | *len = std::min(*len, offset_ - offset); |
| 240 | *len = std::min(*len, eof - offset); |
| 241 | |
| 242 | // We should read from disk. |
| 243 | return false; |
| 244 | } |
| 245 | |
| 246 | if (!Size()) |
| 247 | return false; |
| 248 | |
| 249 | // See if we can fulfill the first part of the operation. |
| 250 | return (offset - offset_ < Size()); |
| 251 | } |
| 252 | |
[email protected] | c67287e | 2012-03-23 01:19:55 | [diff] [blame] | 253 | int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 254 | DCHECK_GE(offset, 0); |
| 255 | DCHECK_GT(len, 0); |
| 256 | DCHECK(Size() || offset < offset_); |
| 257 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 258 | base::span<uint8_t> dest = buf->span(); |
| 259 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 260 | int clean_bytes = 0; |
| 261 | if (offset < offset_) { |
| 262 | // We don't have a file so lets fill the first part with 0. |
| 263 | clean_bytes = std::min(offset_ - offset, len); |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 264 | std::ranges::fill(dest.take_first(base::checked_cast<size_t>(clean_bytes)), |
| 265 | 0); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 266 | if (len == clean_bytes) |
| 267 | return len; |
| 268 | offset = offset_; |
| 269 | len -= clean_bytes; |
| 270 | } |
| 271 | |
| 272 | int start = offset - offset_; |
| 273 | int available = Size() - start; |
| 274 | DCHECK_GE(start, 0); |
| 275 | DCHECK_GE(available, 0); |
| 276 | len = std::min(len, available); |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 277 | size_t sz_len = base::checked_cast<size_t>(len); |
| 278 | size_t sz_start = base::checked_cast<size_t>(start); |
| 279 | dest.first(sz_len).copy_from_nonoverlapping( |
| 280 | base::span(buffer_).subspan(sz_start, sz_len)); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 281 | return len + clean_bytes; |
| 282 | } |
| 283 | |
| 284 | void EntryImpl::UserBuffer::Reset() { |
| 285 | if (!grow_allowed_) { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 286 | if (backend_.get()) |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 287 | backend_->BufferDeleted(capacity() - kMaxBlockSize); |
| 288 | grow_allowed_ = true; |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 289 | std::vector<uint8_t> tmp; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 290 | buffer_.swap(tmp); |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 291 | buffer_.reserve(kMaxBlockSize); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 292 | } |
| 293 | offset_ = 0; |
| 294 | buffer_.clear(); |
| 295 | } |
| 296 | |
| 297 | bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { |
| 298 | DCHECK_GE(required, 0); |
| 299 | int current_size = capacity(); |
| 300 | if (required <= current_size) |
| 301 | return true; |
| 302 | |
| 303 | if (required > limit) |
| 304 | return false; |
| 305 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 306 | if (!backend_.get()) |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 307 | return false; |
| 308 | |
| 309 | int to_add = std::max(required - current_size, kMaxBlockSize * 4); |
| 310 | to_add = std::max(current_size, to_add); |
| 311 | required = std::min(current_size + to_add, limit); |
| 312 | |
| 313 | grow_allowed_ = backend_->IsAllocAllowed(current_size, required); |
| 314 | if (!grow_allowed_) |
| 315 | return false; |
| 316 | |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 317 | DVLOG(3) << "Buffer grow to " << required; |
| 318 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 319 | buffer_.reserve(required); |
| 320 | return true; |
| 321 | } |
| 322 | |
| 323 | // ------------------------------------------------------------------------ |
| 324 | |
[email protected] | 67b09ec | 2010-08-27 17:49:42 | [diff] [blame] | 325 | EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 326 | : entry_(nullptr, Addr(0)), |
| 327 | node_(nullptr, Addr(0)), |
| 328 | backend_(backend->GetWeakPtr()), |
Tsuyoshi Horo | e0235ed6 | 2022-06-09 01:42:30 | [diff] [blame] | 329 | read_only_(read_only) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 330 | entry_.LazyInit(backend->File(address), address); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 331 | } |
| 332 | |
[email protected] | 2235b22 | 2010-07-15 21:03:43 | [diff] [blame] | 333 | void EntryImpl::DoomImpl() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 334 | if (doomed_ || !backend_.get()) |
[email protected] | 2235b22 | 2010-07-15 21:03:43 | [diff] [blame] | 335 | return; |
| 336 | |
| 337 | SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 338 | backend_->InternalDoomEntry(this); |
| 339 | } |
| 340 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 341 | int EntryImpl::ReadDataImpl(int index, |
| 342 | int offset, |
| 343 | IOBuffer* buf, |
| 344 | int buf_len, |
| 345 | CompletionOnceCallback callback) { |
eroman | 24bc6a1 | 2015-05-06 19:55:48 | [diff] [blame] | 346 | if (net_log_.IsCapturing()) { |
Eric Roman | 06bd974 | 2019-07-13 15:19:13 | [diff] [blame] | 347 | NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_READ_DATA, |
| 348 | net::NetLogEventPhase::BEGIN, index, offset, buf_len, |
| 349 | false); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 350 | } |
| 351 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 352 | int result = |
| 353 | InternalReadData(index, offset, buf, buf_len, std::move(callback)); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 354 | |
eroman | 24bc6a1 | 2015-05-06 19:55:48 | [diff] [blame] | 355 | if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { |
Eric Roman | 06bd974 | 2019-07-13 15:19:13 | [diff] [blame] | 356 | NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_READ_DATA, |
| 357 | net::NetLogEventPhase::END, result); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 358 | } |
| 359 | return result; |
| 360 | } |
| 361 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 362 | int EntryImpl::WriteDataImpl(int index, |
| 363 | int offset, |
| 364 | IOBuffer* buf, |
| 365 | int buf_len, |
| 366 | CompletionOnceCallback callback, |
John Abd-El-Malek | 0dcbbcfd | 2023-03-21 00:27:50 | [diff] [blame] | 367 | bool truncate) { |
eroman | 24bc6a1 | 2015-05-06 19:55:48 | [diff] [blame] | 368 | if (net_log_.IsCapturing()) { |
Eric Roman | 06bd974 | 2019-07-13 15:19:13 | [diff] [blame] | 369 | NetLogReadWriteData(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA, |
| 370 | net::NetLogEventPhase::BEGIN, index, offset, buf_len, |
| 371 | truncate); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 372 | } |
| 373 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 374 | int result = InternalWriteData(index, offset, buf, buf_len, |
John Abd-El-Malek | 0dcbbcfd | 2023-03-21 00:27:50 | [diff] [blame] | 375 | std::move(callback), truncate); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 376 | |
eroman | 24bc6a1 | 2015-05-06 19:55:48 | [diff] [blame] | 377 | if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { |
Eric Roman | 06bd974 | 2019-07-13 15:19:13 | [diff] [blame] | 378 | NetLogReadWriteComplete(net_log_, net::NetLogEventType::ENTRY_WRITE_DATA, |
| 379 | net::NetLogEventPhase::END, result); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 380 | } |
| 381 | return result; |
| 382 | } |
| 383 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 384 | int EntryImpl::ReadSparseDataImpl(int64_t offset, |
| 385 | IOBuffer* buf, |
| 386 | int buf_len, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 387 | CompletionOnceCallback callback) { |
[email protected] | 67b09ec | 2010-08-27 17:49:42 | [diff] [blame] | 388 | DCHECK(node_.Data()->dirty || read_only_); |
[email protected] | 84d4cee | 2009-06-18 23:46:58 | [diff] [blame] | 389 | int result = InitSparseData(); |
| 390 | if (net::OK != result) |
| 391 | return result; |
| 392 | |
[email protected] | 5518549 | 2009-06-25 17:28:31 | [diff] [blame] | 393 | result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 394 | std::move(callback)); |
[email protected] | 5518549 | 2009-06-25 17:28:31 | [diff] [blame] | 395 | return result; |
[email protected] | a2068a61 | 2009-06-04 21:43:49 | [diff] [blame] | 396 | } |
| 397 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 398 | int EntryImpl::WriteSparseDataImpl(int64_t offset, |
| 399 | IOBuffer* buf, |
| 400 | int buf_len, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 401 | CompletionOnceCallback callback) { |
[email protected] | 67b09ec | 2010-08-27 17:49:42 | [diff] [blame] | 402 | DCHECK(node_.Data()->dirty || read_only_); |
[email protected] | 84d4cee | 2009-06-18 23:46:58 | [diff] [blame] | 403 | int result = InitSparseData(); |
| 404 | if (net::OK != result) |
| 405 | return result; |
| 406 | |
[email protected] | 5518549 | 2009-06-25 17:28:31 | [diff] [blame] | 407 | result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 408 | buf_len, std::move(callback)); |
[email protected] | 5518549 | 2009-06-25 17:28:31 | [diff] [blame] | 409 | return result; |
[email protected] | a2068a61 | 2009-06-04 21:43:49 | [diff] [blame] | 410 | } |
| 411 | |
Maks Orlovich | 04cd1ad | 2021-07-02 17:32:24 | [diff] [blame] | 412 | RangeResult EntryImpl::GetAvailableRangeImpl(int64_t offset, int len) { |
[email protected] | 2a5ee41 | 2010-07-17 01:03:10 | [diff] [blame] | 413 | int result = InitSparseData(); |
| 414 | if (net::OK != result) |
Maks Orlovich | 04cd1ad | 2021-07-02 17:32:24 | [diff] [blame] | 415 | return RangeResult(static_cast<net::Error>(result)); |
[email protected] | 2a5ee41 | 2010-07-17 01:03:10 | [diff] [blame] | 416 | |
Maks Orlovich | 04cd1ad | 2021-07-02 17:32:24 | [diff] [blame] | 417 | return sparse_->GetAvailableRange(offset, len); |
[email protected] | 2a5ee41 | 2010-07-17 01:03:10 | [diff] [blame] | 418 | } |
| 419 | |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 420 | void EntryImpl::CancelSparseIOImpl() { |
[email protected] | 06e62ba | 2009-10-08 23:07:39 | [diff] [blame] | 421 | if (!sparse_.get()) |
| 422 | return; |
| 423 | |
| 424 | sparse_->CancelIO(); |
| 425 | } |
| 426 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 427 | int EntryImpl::ReadyForSparseIOImpl(CompletionOnceCallback callback) { |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 428 | DCHECK(sparse_.get()); |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 429 | return sparse_->ReadyToUse(std::move(callback)); |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 430 | } |
| 431 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 432 | uint32_t EntryImpl::GetHash() { |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 433 | return entry_.Data()->hash; |
| 434 | } |
| 435 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 436 | bool EntryImpl::CreateEntry(Addr node_address, |
| 437 | const std::string& key, |
| 438 | uint32_t hash) { |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 439 | EntryStore* entry_store = entry_.Data(); |
| 440 | RankingsNode* node = node_.Data(); |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 441 | *node = RankingsNode(); |
| 442 | std::ranges::fill(base::as_writable_bytes(entry_.AllData()), 0); |
| 443 | |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 444 | if (!node_.LazyInit(backend_->File(node_address), node_address)) |
| 445 | return false; |
| 446 | |
| 447 | entry_store->rankings_node = node_address.value(); |
| 448 | node->contents = entry_.address().value(); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 449 | |
| 450 | entry_store->hash = hash; |
[email protected] | 62cdf1e | 2008-12-04 23:03:33 | [diff] [blame] | 451 | entry_store->creation_time = Time::Now().ToInternalValue(); |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 452 | entry_store->key_len = static_cast<int32_t>(key.size()); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 453 | if (entry_store->key_len > kMaxInternalKeyLength) { |
| 454 | Addr address(0); |
| 455 | if (!CreateBlock(entry_store->key_len + 1, &address)) |
| 456 | return false; |
| 457 | |
| 458 | entry_store->long_key = address.value(); |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 459 | File* key_file = GetBackingFile(address, kKeyFileIndex); |
| 460 | key_ = key; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 461 | |
| 462 | size_t offset = 0; |
| 463 | if (address.is_block_file()) |
| 464 | offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 465 | |
Maks Orlovich | 955b5a7 | 2025-06-11 21:46:32 | [diff] [blame] | 466 | if (!key_file || |
| 467 | !key_file->Write( |
| 468 | base::byte_span_with_nul_from_cstring_view(base::cstring_view(key)), |
| 469 | offset)) { |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 470 | DeleteData(address, kKeyFileIndex); |
| 471 | return false; |
| 472 | } |
| 473 | |
| 474 | if (address.is_separate_file()) |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 475 | key_file->SetLength(key.size() + 1); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 476 | } else { |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 477 | auto internal_key = InternalKeySpan(); |
| 478 | internal_key.copy_prefix_from(key); |
| 479 | internal_key.at(key.size()) = '\0'; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 480 | } |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 481 | backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size())); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 482 | node->dirty = backend_->GetCurrentEntryId(); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 483 | return true; |
| 484 | } |
| 485 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 486 | bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) { |
[email protected] | 2b1d4f2 | 2008-08-19 23:03:40 | [diff] [blame] | 487 | if (entry_.Data()->hash != hash || |
| 488 | static_cast<size_t>(entry_.Data()->key_len) != key.size()) |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 489 | return false; |
| 490 | |
[email protected] | f76184a | 2012-09-19 05:28:58 | [diff] [blame] | 491 | return (key.compare(GetKey()) == 0); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | void EntryImpl::InternalDoom() { |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 495 | net_log_.AddEvent(net::NetLogEventType::ENTRY_DOOM); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 496 | DCHECK(node_.HasData()); |
| 497 | if (!node_.Data()->dirty) { |
| 498 | node_.Data()->dirty = backend_->GetCurrentEntryId(); |
| 499 | node_.Store(); |
| 500 | } |
| 501 | doomed_ = true; |
| 502 | } |
| 503 | |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 504 | void EntryImpl::DeleteEntryData(bool everything) { |
| 505 | DCHECK(doomed_ || !everything); |
| 506 | |
[email protected] | 7d4e3a8 | 2009-07-09 22:01:29 | [diff] [blame] | 507 | if (GetEntryFlags() & PARENT_ENTRY) { |
| 508 | // We have some child entries that must go away. |
| 509 | SparseControl::DeleteChildren(this); |
| 510 | } |
| 511 | |
[email protected] | 5518549 | 2009-06-25 17:28:31 | [diff] [blame] | 512 | for (int index = 0; index < kNumStreams; index++) { |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 513 | Addr address(entry_.Data()->data_addr[index]); |
| 514 | if (address.is_initialized()) { |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 515 | backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
| 516 | unreported_size_[index], 0); |
[email protected] | 1849d01 | 2009-03-18 18:54:52 | [diff] [blame] | 517 | entry_.Data()->data_addr[index] = 0; |
| 518 | entry_.Data()->data_size[index] = 0; |
[email protected] | 65188eb | 2010-09-16 20:59:29 | [diff] [blame] | 519 | entry_.Store(); |
| 520 | DeleteData(address, index); |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 521 | } |
| 522 | } |
| 523 | |
[email protected] | 65188eb | 2010-09-16 20:59:29 | [diff] [blame] | 524 | if (!everything) |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 525 | return; |
| 526 | |
| 527 | // Remove all traces of this entry. |
| 528 | backend_->RemoveEntry(this); |
| 529 | |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 530 | // Note that at this point node_ and entry_ are just two blocks of data, and |
| 531 | // even if they reference each other, nobody should be referencing them. |
| 532 | |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 533 | Addr address(entry_.Data()->long_key); |
| 534 | DeleteData(address, kKeyFileIndex); |
| 535 | backend_->ModifyStorageSize(entry_.Data()->key_len, 0); |
| 536 | |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 537 | backend_->DeleteBlock(entry_.address(), true); |
[email protected] | f467442 | 2013-04-17 17:54:41 | [diff] [blame] | 538 | entry_.Discard(); |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 539 | |
[email protected] | f467442 | 2013-04-17 17:54:41 | [diff] [blame] | 540 | if (!LeaveRankingsBehind()) { |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 541 | backend_->DeleteBlock(node_.address(), true); |
[email protected] | f467442 | 2013-04-17 17:54:41 | [diff] [blame] | 542 | node_.Discard(); |
| 543 | } |
[email protected] | 90c7aa0f | 2009-03-06 20:05:34 | [diff] [blame] | 544 | } |
| 545 | |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 546 | CacheAddr EntryImpl::GetNextAddress() { |
| 547 | return entry_.Data()->next; |
| 548 | } |
| 549 | |
| 550 | void EntryImpl::SetNextAddress(Addr address) { |
[email protected] | ceb61da3 | 2011-01-25 23:52:02 | [diff] [blame] | 551 | DCHECK_NE(address.value(), entry_.address().value()); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 552 | entry_.Data()->next = address.value(); |
| 553 | bool success = entry_.Store(); |
| 554 | DCHECK(success); |
| 555 | } |
| 556 | |
| 557 | bool EntryImpl::LoadNodeAddress() { |
| 558 | Addr address(entry_.Data()->rankings_node); |
| 559 | if (!node_.LazyInit(backend_->File(address), address)) |
| 560 | return false; |
| 561 | return node_.Load(); |
| 562 | } |
| 563 | |
[email protected] | c4c32fd | 2009-07-07 19:47:08 | [diff] [blame] | 564 | bool EntryImpl::Update() { |
| 565 | DCHECK(node_.HasData()); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 566 | |
[email protected] | 67b09ec | 2010-08-27 17:49:42 | [diff] [blame] | 567 | if (read_only_) |
| 568 | return true; |
| 569 | |
[email protected] | c4c32fd | 2009-07-07 19:47:08 | [diff] [blame] | 570 | RankingsNode* rankings = node_.Data(); |
[email protected] | 66f16b1 | 2009-07-30 18:51:41 | [diff] [blame] | 571 | if (!rankings->dirty) { |
[email protected] | c4c32fd | 2009-07-07 19:47:08 | [diff] [blame] | 572 | rankings->dirty = backend_->GetCurrentEntryId(); |
[email protected] | c4c32fd | 2009-07-07 19:47:08 | [diff] [blame] | 573 | if (!node_.Store()) |
| 574 | return false; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 575 | } |
[email protected] | c4c32fd | 2009-07-07 19:47:08 | [diff] [blame] | 576 | return true; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 577 | } |
| 578 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 579 | void EntryImpl::SetDirtyFlag(int32_t current_id) { |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 580 | DCHECK(node_.HasData()); |
[email protected] | 5f274d2 | 2011-02-23 00:55:38 | [diff] [blame] | 581 | if (node_.Data()->dirty && current_id != node_.Data()->dirty) |
| 582 | dirty_ = true; |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 583 | |
| 584 | if (!current_id) |
| 585 | dirty_ = true; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 586 | } |
| 587 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 588 | void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) { |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 589 | node_.Data()->dirty = new_id; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 590 | node_.Store(); |
| 591 | } |
| 592 | |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 593 | bool EntryImpl::LeaveRankingsBehind() { |
| 594 | return !node_.Data()->contents; |
| 595 | } |
| 596 | |
| 597 | // This only includes checks that relate to the first block of the entry (the |
| 598 | // first 256 bytes), and values that should be set from the entry creation. |
| 599 | // Basically, even if there is something wrong with this entry, we want to see |
| 600 | // if it is possible to load the rankings node and delete them together. |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 601 | bool EntryImpl::SanityCheck() { |
[email protected] | 09ad77a8 | 2011-11-30 23:37:25 | [diff] [blame] | 602 | if (!entry_.VerifyHash()) |
| 603 | return false; |
| 604 | |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 605 | EntryStore* stored = entry_.Data(); |
| 606 | if (!stored->rankings_node || stored->key_len <= 0) |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 607 | return false; |
| 608 | |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 609 | if (stored->reuse_count < 0 || stored->refetch_count < 0) |
| 610 | return false; |
| 611 | |
| 612 | Addr rankings_addr(stored->rankings_node); |
[email protected] | 82ae49e | 2012-05-01 04:58:18 | [diff] [blame] | 613 | if (!rankings_addr.SanityCheckForRankings()) |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 614 | return false; |
| 615 | |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 616 | Addr next_addr(stored->next); |
gavinp | 1b45006 | 2016-04-22 00:33:27 | [diff] [blame] | 617 | if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) { |
[email protected] | ea9080b | 2011-10-20 19:12:25 | [diff] [blame] | 618 | STRESS_NOTREACHED(); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 619 | return false; |
[email protected] | ea9080b | 2011-10-20 19:12:25 | [diff] [blame] | 620 | } |
| 621 | STRESS_DCHECK(next_addr.value() != entry_.address().value()); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 622 | |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 623 | if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL) |
| 624 | return false; |
| 625 | |
| 626 | Addr key_addr(stored->long_key); |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 627 | if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) || |
| 628 | (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized())) |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 629 | return false; |
| 630 | |
gavinp | 1b45006 | 2016-04-22 00:33:27 | [diff] [blame] | 631 | if (!key_addr.SanityCheck()) |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 632 | return false; |
| 633 | |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 634 | if (key_addr.is_initialized() && |
[email protected] | 4e4e45c | 2011-10-20 17:19:32 | [diff] [blame] | 635 | ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) || |
| 636 | (stored->key_len >= kMaxBlockSize && key_addr.is_block_file()))) |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 637 | return false; |
| 638 | |
| 639 | int num_blocks = NumBlocksForEntry(stored->key_len); |
| 640 | if (entry_.address().num_blocks() != num_blocks) |
| 641 | return false; |
| 642 | |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 643 | return true; |
| 644 | } |
| 645 | |
| 646 | bool EntryImpl::DataSanityCheck() { |
| 647 | EntryStore* stored = entry_.Data(); |
| 648 | Addr key_addr(stored->long_key); |
| 649 | |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 650 | // The key must be NULL terminated. Note the only caller of this is |
| 651 | // BackendImpl::NewEntry, which checks EntryImpl::SanityCheck() first. That |
| 652 | // ensures (among other things) that `key_addr.is_initialized()` reflects |
| 653 | // whether the key is external or inside `stored->key` accurately; and in |
| 654 | // case of internal key 0 <= key_len <= kMaxInternalKeyLength. |
| 655 | if (!key_addr.is_initialized() && |
| 656 | InternalKeySpan().at(static_cast<size_t>(stored->key_len)) != '\0') { |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 657 | return false; |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 658 | } |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 659 | |
Daniel Cheng | abd72909 | 2019-11-20 10:40:17 | [diff] [blame] | 660 | if (stored->hash != base::PersistentHash(GetKey())) |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 661 | return false; |
| 662 | |
| 663 | for (int i = 0; i < kNumStreams; i++) { |
| 664 | Addr data_addr(stored->data_addr[i]); |
| 665 | int data_size = stored->data_size[i]; |
| 666 | if (data_size < 0) |
| 667 | return false; |
| 668 | if (!data_size && data_addr.is_initialized()) |
| 669 | return false; |
gavinp | 1b45006 | 2016-04-22 00:33:27 | [diff] [blame] | 670 | if (!data_addr.SanityCheck()) |
[email protected] | 3c7641d9 | 2011-04-14 21:12:00 | [diff] [blame] | 671 | return false; |
| 672 | if (!data_size) |
| 673 | continue; |
| 674 | if (data_size <= kMaxBlockSize && data_addr.is_separate_file()) |
| 675 | return false; |
| 676 | if (data_size > kMaxBlockSize && data_addr.is_block_file()) |
| 677 | return false; |
| 678 | } |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 679 | return true; |
| 680 | } |
| 681 | |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 682 | void EntryImpl::FixForDelete() { |
| 683 | EntryStore* stored = entry_.Data(); |
| 684 | Addr key_addr(stored->long_key); |
| 685 | |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 686 | // Note: this passed `SanityCheck()` which is sufficient for `stored->key` to |
| 687 | // be the right size for `key_len` if `key_addr` is not initialized, and for |
| 688 | // `key_len` to be in right range. It failed `DataSanityCheck()`, however, |
| 689 | // so the null termination may be missing. |
| 690 | if (!key_addr.is_initialized()) { |
| 691 | InternalKeySpan().at(static_cast<size_t>(stored->key_len)) = '\0'; |
| 692 | } |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 693 | |
| 694 | for (int i = 0; i < kNumStreams; i++) { |
| 695 | Addr data_addr(stored->data_addr[i]); |
| 696 | int data_size = stored->data_size[i]; |
| 697 | if (data_addr.is_initialized()) { |
| 698 | if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) || |
| 699 | (data_size > kMaxBlockSize && data_addr.is_block_file()) || |
gavinp | 1b45006 | 2016-04-22 00:33:27 | [diff] [blame] | 700 | !data_addr.SanityCheck()) { |
[email protected] | ea9080b | 2011-10-20 19:12:25 | [diff] [blame] | 701 | STRESS_NOTREACHED(); |
[email protected] | e47737e2 | 2011-09-29 18:59:03 | [diff] [blame] | 702 | // The address is weird so don't attempt to delete it. |
| 703 | stored->data_addr[i] = 0; |
| 704 | // In general, trust the stored size as it should be in sync with the |
| 705 | // total size tracked by the backend. |
| 706 | } |
| 707 | } |
| 708 | if (data_size < 0) |
| 709 | stored->data_size[i] = 0; |
| 710 | } |
| 711 | entry_.Store(); |
| 712 | } |
| 713 | |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 714 | void EntryImpl::IncrementIoCount() { |
| 715 | backend_->IncrementIoCount(); |
| 716 | } |
| 717 | |
| 718 | void EntryImpl::DecrementIoCount() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 719 | if (backend_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 720 | backend_->DecrementIoCount(); |
| 721 | } |
| 722 | |
| 723 | void EntryImpl::OnEntryCreated(BackendImpl* backend) { |
| 724 | // Just grab a reference to the backround queue. |
| 725 | background_queue_ = backend->GetBackgroundQueue(); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 726 | } |
| 727 | |
Maks Orlovich | 31db0fc0 | 2025-03-25 13:17:14 | [diff] [blame] | 728 | void EntryImpl::SetTimes(base::Time last_used) { |
| 729 | auto timestamp = last_used.ToInternalValue(); |
| 730 | auto* node_data = node_.Data(); |
| 731 | node_data->last_used = timestamp; |
| 732 | node_data->no_longer_used_last_modified = timestamp; |
[email protected] | 07c3f84 | 2008-12-02 19:14:29 | [diff] [blame] | 733 | node_.set_modified(); |
| 734 | } |
| 735 | |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 736 | void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { |
| 737 | DCHECK(!net_log_.net_log()); |
tfarina | 42834111 | 2016-09-22 13:38:20 | [diff] [blame] | 738 | net_log_ = net::NetLogWithSource::Make( |
| 739 | net_log, net::NetLogSourceType::DISK_CACHE_ENTRY); |
Eric Roman | 06bd974 | 2019-07-13 15:19:13 | [diff] [blame] | 740 | net_log_.BeginEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL, [&] { |
| 741 | return CreateNetLogParametersEntryCreationParams(this, created); |
| 742 | }); |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 743 | } |
| 744 | |
tfarina | 42834111 | 2016-09-22 13:38:20 | [diff] [blame] | 745 | const net::NetLogWithSource& EntryImpl::net_log() const { |
[email protected] | f6f1bebc | 2011-01-07 03:04:54 | [diff] [blame] | 746 | return net_log_; |
| 747 | } |
| 748 | |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 749 | // static |
| 750 | int EntryImpl::NumBlocksForEntry(int key_size) { |
| 751 | // The longest key that can be stored using one block. |
| 752 | int key1_len = |
| 753 | static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key)); |
| 754 | |
| 755 | if (key_size < key1_len || key_size > kMaxInternalKeyLength) |
| 756 | return 1; |
| 757 | |
| 758 | return ((key_size - key1_len) / 256 + 2); |
| 759 | } |
| 760 | |
| 761 | // ------------------------------------------------------------------------ |
| 762 | |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 763 | void EntryImpl::Doom() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 764 | if (background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 765 | background_queue_->DoomEntryImpl(this); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 766 | } |
| 767 | |
| 768 | void EntryImpl::Close() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 769 | if (background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 770 | background_queue_->CloseEntryImpl(this); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 771 | } |
| 772 | |
| 773 | std::string EntryImpl::GetKey() const { |
| 774 | CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 775 | int key_len = entry->Data()->key_len; |
| 776 | if (key_len <= kMaxInternalKeyLength) |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 777 | return std::string(base::as_string_view( |
| 778 | InternalKeySpan().first(static_cast<size_t>(key_len)))); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 779 | |
| 780 | // We keep a copy of the key so that we can always return it, even if the |
| 781 | // backend is disabled. |
| 782 | if (!key_.empty()) |
| 783 | return key_; |
| 784 | |
| 785 | Addr address(entry->Data()->long_key); |
| 786 | DCHECK(address.is_initialized()); |
| 787 | size_t offset = 0; |
| 788 | if (address.is_block_file()) |
| 789 | offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 790 | |
mostynb | 91e0da98 | 2015-01-20 19:17:27 | [diff] [blame] | 791 | static_assert(kNumStreams == kKeyFileIndex, "invalid key index"); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 792 | File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, |
| 793 | kKeyFileIndex); |
[email protected] | c3b5987 | 2012-01-27 02:43:33 | [diff] [blame] | 794 | if (!key_file) |
| 795 | return std::string(); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 796 | |
David Benjamin | 24eeb08a | 2024-02-08 23:33:30 | [diff] [blame] | 797 | // We store a trailing \0 on disk. |
| 798 | if (!offset && key_file->GetLength() != static_cast<size_t>(key_len + 1)) { |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 799 | return std::string(); |
David Benjamin | 24eeb08a | 2024-02-08 23:33:30 | [diff] [blame] | 800 | } |
[email protected] | b9b4ce0b | 2011-07-19 18:10:51 | [diff] [blame] | 801 | |
David Benjamin | 24eeb08a | 2024-02-08 23:33:30 | [diff] [blame] | 802 | // Do not attempt read up to the expected on-disk '\0' --- which would be |
| 803 | // |key_len + 1| bytes total --- as if due to a corrupt file it isn't |key_| |
| 804 | // would get its internal nul messed up. |
| 805 | key_.resize(key_len); |
Maks Orlovich | 955b5a7 | 2025-06-11 21:46:32 | [diff] [blame] | 806 | if (!key_file->Read(base::as_writable_byte_span(key_), offset)) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 807 | key_.clear(); |
David Benjamin | 24eeb08a | 2024-02-08 23:33:30 | [diff] [blame] | 808 | } |
Maks Orlovich | 07aac5ad | 2019-07-26 21:52:51 | [diff] [blame] | 809 | DCHECK_LE(strlen(key_.data()), static_cast<size_t>(key_len)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 810 | return key_; |
| 811 | } |
| 812 | |
| 813 | Time EntryImpl::GetLastUsed() const { |
| 814 | CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 815 | return Time::FromInternalValue(node->Data()->last_used); |
| 816 | } |
| 817 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 818 | int32_t EntryImpl::GetDataSize(int index) const { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 819 | if (index < 0 || index >= kNumStreams) |
| 820 | return 0; |
| 821 | |
| 822 | CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 823 | return entry->Data()->data_size[index]; |
| 824 | } |
| 825 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 826 | int EntryImpl::ReadData(int index, |
| 827 | int offset, |
| 828 | IOBuffer* buf, |
| 829 | int buf_len, |
| 830 | CompletionOnceCallback callback) { |
[email protected] | 2a65aceb8 | 2011-12-19 20:59:27 | [diff] [blame] | 831 | if (callback.is_null()) |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 832 | return ReadDataImpl(index, offset, buf, buf_len, std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 833 | |
| 834 | DCHECK(node_.Data()->dirty || read_only_); |
| 835 | if (index < 0 || index >= kNumStreams) |
| 836 | return net::ERR_INVALID_ARGUMENT; |
| 837 | |
| 838 | int entry_size = entry_.Data()->data_size[index]; |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 839 | if (offset >= entry_size || !buf_len) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 840 | return 0; |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 841 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 842 | |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 843 | if (offset < 0 || buf_len < 0) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 844 | return net::ERR_INVALID_ARGUMENT; |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 845 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 846 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 847 | if (!background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 848 | return net::ERR_UNEXPECTED; |
| 849 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 850 | background_queue_->ReadData(this, index, offset, buf, buf_len, |
| 851 | std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 852 | return net::ERR_IO_PENDING; |
| 853 | } |
| 854 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 855 | int EntryImpl::WriteData(int index, |
| 856 | int offset, |
| 857 | IOBuffer* buf, |
| 858 | int buf_len, |
| 859 | CompletionOnceCallback callback, |
| 860 | bool truncate) { |
John Abd-El-Malek | bb9a602 | 2022-09-28 23:53:11 | [diff] [blame] | 861 | if (callback.is_null()) { |
John Abd-El-Malek | 0dcbbcfd | 2023-03-21 00:27:50 | [diff] [blame] | 862 | return WriteDataImpl(index, offset, buf, buf_len, std::move(callback), |
| 863 | truncate); |
John Abd-El-Malek | bb9a602 | 2022-09-28 23:53:11 | [diff] [blame] | 864 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 865 | |
| 866 | DCHECK(node_.Data()->dirty || read_only_); |
| 867 | if (index < 0 || index >= kNumStreams) |
| 868 | return net::ERR_INVALID_ARGUMENT; |
| 869 | |
| 870 | if (offset < 0 || buf_len < 0) |
| 871 | return net::ERR_INVALID_ARGUMENT; |
| 872 | |
Maks Orlovich | 5e7ded6 | 2025-06-30 16:21:50 | [diff] [blame] | 873 | if (!buf && buf_len != 0) { |
| 874 | return net::ERR_INVALID_ARGUMENT; |
| 875 | } |
| 876 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 877 | if (!background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 878 | return net::ERR_UNEXPECTED; |
| 879 | |
| 880 | background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 881 | std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 882 | return net::ERR_IO_PENDING; |
| 883 | } |
| 884 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 885 | int EntryImpl::ReadSparseData(int64_t offset, |
| 886 | IOBuffer* buf, |
| 887 | int buf_len, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 888 | CompletionOnceCallback callback) { |
[email protected] | 2a65aceb8 | 2011-12-19 20:59:27 | [diff] [blame] | 889 | if (callback.is_null()) |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 890 | return ReadSparseDataImpl(offset, buf, buf_len, std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 891 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 892 | if (!background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 893 | return net::ERR_UNEXPECTED; |
| 894 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 895 | background_queue_->ReadSparseData(this, offset, buf, buf_len, |
| 896 | std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 897 | return net::ERR_IO_PENDING; |
| 898 | } |
| 899 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 900 | int EntryImpl::WriteSparseData(int64_t offset, |
| 901 | IOBuffer* buf, |
| 902 | int buf_len, |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 903 | CompletionOnceCallback callback) { |
[email protected] | 2a65aceb8 | 2011-12-19 20:59:27 | [diff] [blame] | 904 | if (callback.is_null()) |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 905 | return WriteSparseDataImpl(offset, buf, buf_len, std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 906 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 907 | if (!background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 908 | return net::ERR_UNEXPECTED; |
| 909 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 910 | background_queue_->WriteSparseData(this, offset, buf, buf_len, |
| 911 | std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 912 | return net::ERR_IO_PENDING; |
| 913 | } |
| 914 | |
Maks Orlovich | 04cd1ad | 2021-07-02 17:32:24 | [diff] [blame] | 915 | RangeResult EntryImpl::GetAvailableRange(int64_t offset, |
| 916 | int len, |
| 917 | RangeResultCallback callback) { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 918 | if (!background_queue_.get()) |
Maks Orlovich | 04cd1ad | 2021-07-02 17:32:24 | [diff] [blame] | 919 | return RangeResult(net::ERR_UNEXPECTED); |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 920 | |
Maks Orlovich | 04cd1ad | 2021-07-02 17:32:24 | [diff] [blame] | 921 | background_queue_->GetAvailableRange(this, offset, len, std::move(callback)); |
| 922 | return RangeResult(net::ERR_IO_PENDING); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 923 | } |
| 924 | |
| 925 | bool EntryImpl::CouldBeSparse() const { |
| 926 | if (sparse_.get()) |
| 927 | return true; |
| 928 | |
Lei Zhang | e00db75 | 2021-04-17 00:48:46 | [diff] [blame] | 929 | auto sparse = std::make_unique<SparseControl>(const_cast<EntryImpl*>(this)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 930 | return sparse->CouldBeSparse(); |
| 931 | } |
| 932 | |
| 933 | void EntryImpl::CancelSparseIO() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 934 | if (background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 935 | background_queue_->CancelSparseIO(this); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 936 | } |
| 937 | |
Victor Costan | 45c36ac | 2018-10-08 07:31:52 | [diff] [blame] | 938 | net::Error EntryImpl::ReadyForSparseIO(CompletionOnceCallback callback) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 939 | if (!sparse_.get()) |
| 940 | return net::OK; |
| 941 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 942 | if (!background_queue_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 943 | return net::ERR_UNEXPECTED; |
| 944 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 945 | background_queue_->ReadyForSparseIO(this, std::move(callback)); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 946 | return net::ERR_IO_PENDING; |
| 947 | } |
| 948 | |
Jay Civelli | 78612bf | 2018-03-01 20:59:12 | [diff] [blame] | 949 | void EntryImpl::SetLastUsedTimeForTest(base::Time time) { |
Maks Orlovich | 31db0fc0 | 2025-03-25 13:17:14 | [diff] [blame] | 950 | SetTimes(time); |
Jay Civelli | 78612bf | 2018-03-01 20:59:12 | [diff] [blame] | 951 | } |
| 952 | |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 953 | // When an entry is deleted from the cache, we clean up all the data associated |
| 954 | // with it for two reasons: to simplify the reuse of the block (we know that any |
| 955 | // unused block is filled with zeros), and to simplify the handling of write / |
| 956 | // read partial information from an entry (don't have to worry about returning |
| 957 | // data related to a previous cache entry because the range was not fully |
| 958 | // written before). |
| 959 | EntryImpl::~EntryImpl() { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 960 | if (!backend_.get()) { |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 961 | entry_.clear_modified(); |
| 962 | node_.clear_modified(); |
| 963 | return; |
| 964 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 965 | |
| 966 | // Save the sparse info to disk. This will generate IO for this entry and |
| 967 | // maybe for a child entry, so it is important to do it before deleting this |
| 968 | // entry. |
| 969 | sparse_.reset(); |
| 970 | |
| 971 | // Remove this entry from the list of open entries. |
| 972 | backend_->OnEntryDestroyBegin(entry_.address()); |
| 973 | |
| 974 | if (doomed_) { |
| 975 | DeleteEntryData(true); |
| 976 | } else { |
[email protected] | ea9080b | 2011-10-20 19:12:25 | [diff] [blame] | 977 | #if defined(NET_BUILD_STRESS_CACHE) |
| 978 | SanityCheck(); |
| 979 | #endif |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 980 | net_log_.AddEvent(net::NetLogEventType::ENTRY_CLOSE); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 981 | bool ret = true; |
| 982 | for (int index = 0; index < kNumStreams; index++) { |
| 983 | if (user_buffers_[index].get()) { |
[email protected] | 6547ccdb | 2014-07-08 23:22:43 | [diff] [blame] | 984 | ret = Flush(index, 0); |
| 985 | if (!ret) |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 986 | LOG(ERROR) << "Failed to save user data"; |
| 987 | } |
| 988 | if (unreported_size_[index]) { |
| 989 | backend_->ModifyStorageSize( |
| 990 | entry_.Data()->data_size[index] - unreported_size_[index], |
| 991 | entry_.Data()->data_size[index]); |
| 992 | } |
| 993 | } |
| 994 | |
| 995 | if (!ret) { |
| 996 | // There was a failure writing the actual data. Mark the entry as dirty. |
| 997 | int current_id = backend_->GetCurrentEntryId(); |
| 998 | node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; |
| 999 | node_.Store(); |
[email protected] | 417567f | 2012-03-12 18:00:42 | [diff] [blame] | 1000 | } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1001 | node_.Data()->dirty = 0; |
| 1002 | node_.Store(); |
| 1003 | } |
| 1004 | } |
| 1005 | |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 1006 | net_log_.EndEvent(net::NetLogEventType::DISK_CACHE_ENTRY_IMPL); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1007 | backend_->OnEntryDestroyEnd(); |
| 1008 | } |
| 1009 | |
| 1010 | // ------------------------------------------------------------------------ |
| 1011 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 1012 | int EntryImpl::InternalReadData(int index, |
| 1013 | int offset, |
| 1014 | IOBuffer* buf, |
| 1015 | int buf_len, |
| 1016 | CompletionOnceCallback callback) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1017 | DCHECK(node_.Data()->dirty || read_only_); |
| 1018 | DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; |
| 1019 | if (index < 0 || index >= kNumStreams) |
| 1020 | return net::ERR_INVALID_ARGUMENT; |
| 1021 | |
| 1022 | int entry_size = entry_.Data()->data_size[index]; |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 1023 | if (offset >= entry_size || !buf_len) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1024 | return 0; |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 1025 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1026 | |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 1027 | if (offset < 0 || buf_len < 0) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1028 | return net::ERR_INVALID_ARGUMENT; |
Eriko Kurimoto | 2790239 | 2025-06-23 13:16:17 | [diff] [blame] | 1029 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1030 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1031 | if (!backend_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 1032 | return net::ERR_UNEXPECTED; |
| 1033 | |
Matthew Denton | a21ca7b | 2019-03-13 19:02:32 | [diff] [blame] | 1034 | int end_offset; |
| 1035 | if (!base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) || |
| 1036 | end_offset > entry_size) |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1037 | buf_len = entry_size - offset; |
| 1038 | |
| 1039 | UpdateRank(false); |
| 1040 | |
| 1041 | backend_->OnEvent(Stats::READ_DATA); |
| 1042 | backend_->OnRead(buf_len); |
| 1043 | |
| 1044 | Addr address(entry_.Data()->data_addr[index]); |
| 1045 | int eof = address.is_initialized() ? entry_size : 0; |
| 1046 | if (user_buffers_[index].get() && |
| 1047 | user_buffers_[index]->PreRead(eof, offset, &buf_len)) { |
| 1048 | // Complete the operation locally. |
| 1049 | buf_len = user_buffers_[index]->Read(offset, buf, buf_len); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1050 | return buf_len; |
| 1051 | } |
| 1052 | |
| 1053 | address.set_value(entry_.Data()->data_addr[index]); |
[email protected] | 52c7571 | 2011-12-16 01:19:19 | [diff] [blame] | 1054 | if (!address.is_initialized()) { |
| 1055 | DoomImpl(); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1056 | return net::ERR_FAILED; |
[email protected] | 52c7571 | 2011-12-16 01:19:19 | [diff] [blame] | 1057 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1058 | |
| 1059 | File* file = GetBackingFile(address, index); |
[email protected] | 52c7571 | 2011-12-16 01:19:19 | [diff] [blame] | 1060 | if (!file) { |
| 1061 | DoomImpl(); |
[email protected] | eab2e041 | 2013-04-26 07:09:50 | [diff] [blame] | 1062 | LOG(ERROR) << "No file for " << std::hex << address.value(); |
| 1063 | return net::ERR_FILE_NOT_FOUND; |
[email protected] | 52c7571 | 2011-12-16 01:19:19 | [diff] [blame] | 1064 | } |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1065 | |
| 1066 | size_t file_offset = offset; |
| 1067 | if (address.is_block_file()) { |
| 1068 | DCHECK_LE(offset + buf_len, kMaxBlockSize); |
| 1069 | file_offset += address.start_block() * address.BlockSize() + |
| 1070 | kBlockHeaderSize; |
| 1071 | } |
| 1072 | |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 1073 | SyncCallback* io_callback = nullptr; |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 1074 | bool null_callback = callback.is_null(); |
| 1075 | if (!null_callback) { |
| 1076 | io_callback = |
| 1077 | new SyncCallback(base::WrapRefCounted(this), buf, std::move(callback), |
| 1078 | net::NetLogEventType::ENTRY_READ_DATA); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1079 | } |
| 1080 | |
| 1081 | bool completed; |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1082 | if (!file->Read(buf->first(base::checked_cast<size_t>(buf_len)), file_offset, |
| 1083 | io_callback, &completed)) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1084 | if (io_callback) |
| 1085 | io_callback->Discard(); |
[email protected] | 52c7571 | 2011-12-16 01:19:19 | [diff] [blame] | 1086 | DoomImpl(); |
[email protected] | eab2e041 | 2013-04-26 07:09:50 | [diff] [blame] | 1087 | return net::ERR_CACHE_READ_FAILURE; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1088 | } |
| 1089 | |
| 1090 | if (io_callback && completed) |
| 1091 | io_callback->Discard(); |
| 1092 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 1093 | return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1094 | } |
| 1095 | |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 1096 | int EntryImpl::InternalWriteData(int index, |
| 1097 | int offset, |
| 1098 | IOBuffer* buf, |
| 1099 | int buf_len, |
| 1100 | CompletionOnceCallback callback, |
John Abd-El-Malek | 0dcbbcfd | 2023-03-21 00:27:50 | [diff] [blame] | 1101 | bool truncate) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1102 | DCHECK(node_.Data()->dirty || read_only_); |
| 1103 | DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; |
| 1104 | if (index < 0 || index >= kNumStreams) |
| 1105 | return net::ERR_INVALID_ARGUMENT; |
| 1106 | |
| 1107 | if (offset < 0 || buf_len < 0) |
| 1108 | return net::ERR_INVALID_ARGUMENT; |
| 1109 | |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1110 | if (!backend_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 1111 | return net::ERR_UNEXPECTED; |
| 1112 | |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1113 | int max_file_size = backend_->MaxFileSize(); |
| 1114 | |
Matthew Denton | a21ca7b | 2019-03-13 19:02:32 | [diff] [blame] | 1115 | int end_offset; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1116 | if (offset > max_file_size || buf_len > max_file_size || |
Matthew Denton | a21ca7b | 2019-03-13 19:02:32 | [diff] [blame] | 1117 | !base::CheckAdd(offset, buf_len).AssignIfValid(&end_offset) || |
| 1118 | end_offset > max_file_size) { |
| 1119 | int size = base::CheckAdd(offset, buf_len) |
| 1120 | .ValueOrDefault(std::numeric_limits<int32_t>::max()); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1121 | backend_->TooMuchStorageRequested(size); |
| 1122 | return net::ERR_FAILED; |
| 1123 | } |
| 1124 | |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1125 | // Read the size at this point (it may change inside prepare). |
| 1126 | int entry_size = entry_.Data()->data_size[index]; |
| 1127 | bool extending = entry_size < offset + buf_len; |
| 1128 | truncate = truncate && entry_size > offset + buf_len; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1129 | if (!PrepareTarget(index, offset, buf_len, truncate)) |
| 1130 | return net::ERR_FAILED; |
| 1131 | |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1132 | if (extending || truncate) |
| 1133 | UpdateSize(index, entry_size, offset + buf_len); |
| 1134 | |
| 1135 | UpdateRank(true); |
| 1136 | |
| 1137 | backend_->OnEvent(Stats::WRITE_DATA); |
| 1138 | backend_->OnWrite(buf_len); |
| 1139 | |
| 1140 | if (user_buffers_[index].get()) { |
| 1141 | // Complete the operation locally. |
| 1142 | user_buffers_[index]->Write(offset, buf, buf_len); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1143 | return buf_len; |
| 1144 | } |
| 1145 | |
| 1146 | Addr address(entry_.Data()->data_addr[index]); |
| 1147 | if (offset + buf_len == 0) { |
| 1148 | if (truncate) { |
| 1149 | DCHECK(!address.is_initialized()); |
| 1150 | } |
| 1151 | return 0; |
| 1152 | } |
| 1153 | |
| 1154 | File* file = GetBackingFile(address, index); |
| 1155 | if (!file) |
[email protected] | eab2e041 | 2013-04-26 07:09:50 | [diff] [blame] | 1156 | return net::ERR_FILE_NOT_FOUND; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1157 | |
| 1158 | size_t file_offset = offset; |
| 1159 | if (address.is_block_file()) { |
| 1160 | DCHECK_LE(offset + buf_len, kMaxBlockSize); |
| 1161 | file_offset += address.start_block() * address.BlockSize() + |
| 1162 | kBlockHeaderSize; |
| 1163 | } else if (truncate || (extending && !buf_len)) { |
| 1164 | if (!file->SetLength(offset + buf_len)) |
| 1165 | return net::ERR_FAILED; |
| 1166 | } |
| 1167 | |
| 1168 | if (!buf_len) |
| 1169 | return 0; |
| 1170 | |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 1171 | SyncCallback* io_callback = nullptr; |
Maks Orlovich | 4acf0b58 | 2018-06-07 11:39:44 | [diff] [blame] | 1172 | bool null_callback = callback.is_null(); |
| 1173 | if (!null_callback) { |
John Abd-El-Malek | 0dcbbcfd | 2023-03-21 00:27:50 | [diff] [blame] | 1174 | io_callback = new SyncCallback(this, buf, std::move(callback), |
mikecirone | 8b85c43 | 2016-09-08 19:11:00 | [diff] [blame] | 1175 | net::NetLogEventType::ENTRY_WRITE_DATA); |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1176 | } |
| 1177 | |
| 1178 | bool completed; |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1179 | if (!file->Write(buf->first(base::checked_cast<size_t>(buf_len)), file_offset, |
| 1180 | io_callback, &completed)) { |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1181 | if (io_callback) |
| 1182 | io_callback->Discard(); |
[email protected] | eab2e041 | 2013-04-26 07:09:50 | [diff] [blame] | 1183 | return net::ERR_CACHE_WRITE_FAILURE; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1184 | } |
| 1185 | |
| 1186 | if (io_callback && completed) |
| 1187 | io_callback->Discard(); |
| 1188 | |
John Abd-El-Malek | 0dcbbcfd | 2023-03-21 00:27:50 | [diff] [blame] | 1189 | return (completed || null_callback) ? buf_len : net::ERR_IO_PENDING; |
[email protected] | 4b3c95dd | 2011-01-07 23:02:11 | [diff] [blame] | 1190 | } |
| 1191 | |
[email protected] | 18995e2 | 2009-08-31 17:35:14 | [diff] [blame] | 1192 | // ------------------------------------------------------------------------ |
| 1193 | |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1194 | bool EntryImpl::CreateDataBlock(int index, int size) { |
[email protected] | 5518549 | 2009-06-25 17:28:31 | [diff] [blame] | 1195 | DCHECK(index >= 0 && index < kNumStreams); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1196 | |
[email protected] | 9b2dda1 | 2009-03-18 23:22:33 | [diff] [blame] | 1197 | Addr address(entry_.Data()->data_addr[index]); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1198 | if (!CreateBlock(size, &address)) |
| 1199 | return false; |
| 1200 | |
| 1201 | entry_.Data()->data_addr[index] = address.value(); |
| 1202 | entry_.Store(); |
| 1203 | return true; |
| 1204 | } |
| 1205 | |
| 1206 | bool EntryImpl::CreateBlock(int size, Addr* address) { |
| 1207 | DCHECK(!address->is_initialized()); |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1208 | if (!backend_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 1209 | return false; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1210 | |
| 1211 | FileType file_type = Addr::RequiredFileType(size); |
| 1212 | if (EXTERNAL == file_type) { |
| 1213 | if (size > backend_->MaxFileSize()) |
| 1214 | return false; |
| 1215 | if (!backend_->CreateExternalFile(address)) |
| 1216 | return false; |
| 1217 | } else { |
[email protected] | 8a09bc76 | 2013-05-24 00:47:23 | [diff] [blame] | 1218 | int num_blocks = Addr::RequiredBlocks(size, file_type); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1219 | |
| 1220 | if (!backend_->CreateBlock(file_type, num_blocks, address)) |
| 1221 | return false; |
| 1222 | } |
| 1223 | return true; |
| 1224 | } |
| 1225 | |
[email protected] | 65188eb | 2010-09-16 20:59:29 | [diff] [blame] | 1226 | // Note that this method may end up modifying a block file so upon return the |
| 1227 | // involved block will be free, and could be reused for something else. If there |
| 1228 | // is a crash after that point (and maybe before returning to the caller), the |
| 1229 | // entry will be left dirty... and at some point it will be discarded; it is |
| 1230 | // important that the entry doesn't keep a reference to this address, or we'll |
| 1231 | // end up deleting the contents of |address| once again. |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1232 | void EntryImpl::DeleteData(Addr address, int index) { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1233 | DCHECK(backend_.get()); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1234 | if (!address.is_initialized()) |
| 1235 | return; |
| 1236 | if (address.is_separate_file()) { |
Yutaka Hirano | 238ce65 | 2022-04-19 05:20:40 | [diff] [blame] | 1237 | int failure = !base::DeleteFile(backend_->GetFileName(address)); |
[email protected] | 10f2e69 | 2010-09-29 21:00:35 | [diff] [blame] | 1238 | if (failure) { |
[email protected] | cfaa1f2 | 2009-10-12 17:14:59 | [diff] [blame] | 1239 | LOG(ERROR) << "Failed to delete " << |
| 1240 | backend_->GetFileName(address).value() << " from the cache."; |
[email protected] | 10f2e69 | 2010-09-29 21:00:35 | [diff] [blame] | 1241 | } |
[email protected] | 9049948 | 2013-06-01 00:39:50 | [diff] [blame] | 1242 | if (files_[index].get()) |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 1243 | files_[index] = nullptr; // Releases the object. |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1244 | } else { |
| 1245 | backend_->DeleteBlock(address, true); |
| 1246 | } |
| 1247 | } |
| 1248 | |
| 1249 | void EntryImpl::UpdateRank(bool modified) { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1250 | if (!backend_.get()) |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 1251 | return; |
| 1252 | |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1253 | if (!doomed_) { |
| 1254 | // Everything is handled by the backend. |
[email protected] | 67b09ec | 2010-08-27 17:49:42 | [diff] [blame] | 1255 | backend_->UpdateRank(this, modified); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1256 | return; |
| 1257 | } |
| 1258 | |
| 1259 | Time current = Time::Now(); |
Maks Orlovich | 31db0fc0 | 2025-03-25 13:17:14 | [diff] [blame] | 1260 | auto timestamp = current.ToInternalValue(); |
| 1261 | auto* node_data = node_.Data(); |
| 1262 | node_data->last_used = timestamp; |
| 1263 | node_data->no_longer_used_last_modified = timestamp; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1264 | } |
| 1265 | |
| 1266 | File* EntryImpl::GetBackingFile(Addr address, int index) { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1267 | if (!backend_.get()) |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 1268 | return nullptr; |
[email protected] | 4739f709 | 2012-03-20 23:15:10 | [diff] [blame] | 1269 | |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1270 | File* file; |
| 1271 | if (address.is_separate_file()) |
| 1272 | file = GetExternalFile(address, index); |
| 1273 | else |
| 1274 | file = backend_->File(address); |
| 1275 | return file; |
| 1276 | } |
| 1277 | |
| 1278 | File* EntryImpl::GetExternalFile(Addr address, int index) { |
[email protected] | 62cdf1e | 2008-12-04 23:03:33 | [diff] [blame] | 1279 | DCHECK(index >= 0 && index <= kKeyFileIndex); |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1280 | if (!files_[index].get()) { |
| 1281 | // For a key file, use mixed mode IO. |
Tsuyoshi Horo | 2c0a504 | 2022-07-06 05:53:07 | [diff] [blame] | 1282 | auto file = base::MakeRefCounted<File>(kKeyFileIndex == index); |
[email protected] | f294da7 | 2009-10-12 21:39:37 | [diff] [blame] | 1283 | if (file->Init(backend_->GetFileName(address))) |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1284 | files_[index].swap(file); |
| 1285 | } |
| 1286 | return files_[index].get(); |
| 1287 | } |
| 1288 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1289 | // We keep a memory buffer for everything that ends up stored on a block file |
| 1290 | // (because we don't know yet the final data size), and for some of the data |
| 1291 | // that end up on external files. This function will initialize that memory |
| 1292 | // buffer and / or the files needed to store the data. |
| 1293 | // |
| 1294 | // In general, a buffer may overlap data already stored on disk, and in that |
| 1295 | // case, the contents of the buffer are the most accurate. It may also extend |
| 1296 | // the file, but we don't want to read from disk just to keep the buffer up to |
| 1297 | // date. This means that as soon as there is a chance to get confused about what |
| 1298 | // is the most recent version of some part of a file, we'll flush the buffer and |
| 1299 | // reuse it for the new data. Keep in mind that the normal use pattern is quite |
| 1300 | // simple (write sequentially from the beginning), so we optimize for handling |
| 1301 | // that case. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1302 | bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, |
| 1303 | bool truncate) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1304 | if (truncate) |
| 1305 | return HandleTruncation(index, offset, buf_len); |
| 1306 | |
[email protected] | 1bf71ed | 2010-10-01 16:31:10 | [diff] [blame] | 1307 | if (!offset && !buf_len) |
| 1308 | return true; |
| 1309 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1310 | Addr address(entry_.Data()->data_addr[index]); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1311 | if (address.is_initialized()) { |
| 1312 | if (address.is_block_file() && !MoveToLocalBuffer(index)) |
| 1313 | return false; |
[email protected] | e7f2964 | 2009-03-02 22:53:18 | [diff] [blame] | 1314 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1315 | if (!user_buffers_[index].get() && offset < kMaxBlockSize) { |
| 1316 | // We are about to create a buffer for the first 16KB, make sure that we |
| 1317 | // preserve existing data. |
| 1318 | if (!CopyToLocalBuffer(index)) |
| 1319 | return false; |
| 1320 | } |
| 1321 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1322 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1323 | if (!user_buffers_[index].get()) |
Peter Boström | 8a754069 | 2021-04-05 20:48:20 | [diff] [blame] | 1324 | user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1325 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1326 | return PrepareBuffer(index, offset, buf_len); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1327 | } |
| 1328 | |
| 1329 | // We get to this function with some data already stored. If there is a |
| 1330 | // truncation that results on data stored internally, we'll explicitly |
| 1331 | // handle the case here. |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1332 | bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1333 | Addr address(entry_.Data()->data_addr[index]); |
| 1334 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1335 | int current_size = entry_.Data()->data_size[index]; |
| 1336 | int new_size = offset + buf_len; |
| 1337 | |
Maks Orlovich | 7d10fb0 | 2019-03-29 21:02:08 | [diff] [blame] | 1338 | // This is only called when actually truncating the file, not simply when |
| 1339 | // truncate = true is passed to WriteData(), which could be growing the file. |
| 1340 | DCHECK_LT(new_size, current_size); |
| 1341 | |
| 1342 | if (new_size == 0) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1343 | // This is by far the most common scenario. |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1344 | backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); |
| 1345 | entry_.Data()->data_addr[index] = 0; |
| 1346 | entry_.Data()->data_size[index] = 0; |
| 1347 | unreported_size_[index] = 0; |
| 1348 | entry_.Store(); |
[email protected] | 65188eb | 2010-09-16 20:59:29 | [diff] [blame] | 1349 | DeleteData(address, index); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1350 | |
| 1351 | user_buffers_[index].reset(); |
| 1352 | return true; |
| 1353 | } |
| 1354 | |
| 1355 | // We never postpone truncating a file, if there is one, but we may postpone |
| 1356 | // telling the backend about the size reduction. |
| 1357 | if (user_buffers_[index].get()) { |
| 1358 | DCHECK_GE(current_size, user_buffers_[index]->Start()); |
| 1359 | if (!address.is_initialized()) { |
| 1360 | // There is no overlap between the buffer and disk. |
| 1361 | if (new_size > user_buffers_[index]->Start()) { |
Maks Orlovich | 7d10fb0 | 2019-03-29 21:02:08 | [diff] [blame] | 1362 | // Truncate our buffer. |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1363 | DCHECK_LT(new_size, user_buffers_[index]->End()); |
| 1364 | user_buffers_[index]->Truncate(new_size); |
Maks Orlovich | 7d10fb0 | 2019-03-29 21:02:08 | [diff] [blame] | 1365 | |
| 1366 | if (offset < user_buffers_[index]->Start()) { |
| 1367 | // Request to write before the current buffer's start, so flush it to |
| 1368 | // disk and re-init. |
| 1369 | UpdateSize(index, current_size, new_size); |
| 1370 | if (!Flush(index, 0)) |
| 1371 | return false; |
| 1372 | return PrepareBuffer(index, offset, buf_len); |
| 1373 | } else { |
| 1374 | // Can just stick to using the memory buffer. |
| 1375 | return true; |
| 1376 | } |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1377 | } |
| 1378 | |
Maks Orlovich | 7d10fb0 | 2019-03-29 21:02:08 | [diff] [blame] | 1379 | // Truncated to before the current buffer, so can just discard it. |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1380 | user_buffers_[index]->Reset(); |
| 1381 | return PrepareBuffer(index, offset, buf_len); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1382 | } |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1383 | |
| 1384 | // There is some overlap or we need to extend the file before the |
| 1385 | // truncation. |
| 1386 | if (offset > user_buffers_[index]->Start()) |
| 1387 | user_buffers_[index]->Truncate(new_size); |
| 1388 | UpdateSize(index, current_size, new_size); |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1389 | if (!Flush(index, 0)) |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1390 | return false; |
| 1391 | user_buffers_[index].reset(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1392 | } |
| 1393 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1394 | // We have data somewhere, and it is not in a buffer. |
| 1395 | DCHECK(!user_buffers_[index].get()); |
| 1396 | DCHECK(address.is_initialized()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1397 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1398 | if (new_size > kMaxBlockSize) |
| 1399 | return true; // Let the operation go directly to disk. |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1400 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1401 | return ImportSeparateFile(index, offset + buf_len); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1402 | } |
| 1403 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1404 | bool EntryImpl::CopyToLocalBuffer(int index) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1405 | Addr address(entry_.Data()->data_addr[index]); |
| 1406 | DCHECK(!user_buffers_[index].get()); |
| 1407 | DCHECK(address.is_initialized()); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1408 | |
| 1409 | int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize); |
Peter Boström | 8a754069 | 2021-04-05 20:48:20 | [diff] [blame] | 1410 | user_buffers_[index] = std::make_unique<UserBuffer>(backend_.get()); |
Raul Tambre | 94493c65 | 2019-03-11 17:18:35 | [diff] [blame] | 1411 | user_buffers_[index]->Write(len, nullptr, 0); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1412 | |
| 1413 | File* file = GetBackingFile(address, index); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1414 | int offset = 0; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1415 | |
| 1416 | if (address.is_block_file()) |
| 1417 | offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 1418 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1419 | if (!file || !file->Read(user_buffers_[index]->as_span().first( |
| 1420 | base::checked_cast<size_t>(len)), |
| 1421 | offset, nullptr, nullptr)) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1422 | user_buffers_[index].reset(); |
| 1423 | return false; |
| 1424 | } |
| 1425 | return true; |
| 1426 | } |
| 1427 | |
| 1428 | bool EntryImpl::MoveToLocalBuffer(int index) { |
| 1429 | if (!CopyToLocalBuffer(index)) |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1430 | return false; |
| 1431 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1432 | Addr address(entry_.Data()->data_addr[index]); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1433 | entry_.Data()->data_addr[index] = 0; |
| 1434 | entry_.Store(); |
[email protected] | 65188eb | 2010-09-16 20:59:29 | [diff] [blame] | 1435 | DeleteData(address, index); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1436 | |
| 1437 | // If we lose this entry we'll see it as zero sized. |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1438 | int len = entry_.Data()->data_size[index]; |
| 1439 | backend_->ModifyStorageSize(len - unreported_size_[index], 0); |
| 1440 | unreported_size_[index] = len; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1441 | return true; |
| 1442 | } |
| 1443 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1444 | bool EntryImpl::ImportSeparateFile(int index, int new_size) { |
| 1445 | if (entry_.Data()->data_size[index] > new_size) |
| 1446 | UpdateSize(index, entry_.Data()->data_size[index], new_size); |
| 1447 | |
| 1448 | return MoveToLocalBuffer(index); |
| 1449 | } |
| 1450 | |
| 1451 | bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) { |
| 1452 | DCHECK(user_buffers_[index].get()); |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 1453 | if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) || |
| 1454 | offset > entry_.Data()->data_size[index]) { |
| 1455 | // We are about to extend the buffer or the file (with zeros), so make sure |
| 1456 | // that we are not overwriting anything. |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1457 | Addr address(entry_.Data()->data_addr[index]); |
| 1458 | if (address.is_initialized() && address.is_separate_file()) { |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 1459 | if (!Flush(index, 0)) |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1460 | return false; |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 1461 | // There is an actual file already, and we don't want to keep track of |
| 1462 | // its length so we let this operation go straight to disk. |
| 1463 | // The only case when a buffer is allowed to extend the file (as in fill |
| 1464 | // with zeros before the start) is when there is no file yet to extend. |
| 1465 | user_buffers_[index].reset(); |
| 1466 | return true; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1467 | } |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1468 | } |
| 1469 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1470 | if (!user_buffers_[index]->PreWrite(offset, buf_len)) { |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1471 | if (!Flush(index, offset + buf_len)) |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1472 | return false; |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1473 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1474 | // Lets try again. |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 1475 | if (offset > user_buffers_[index]->End() || |
| 1476 | !user_buffers_[index]->PreWrite(offset, buf_len)) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1477 | // We cannot complete the operation with a buffer. |
| 1478 | DCHECK(!user_buffers_[index]->Size()); |
| 1479 | DCHECK(!user_buffers_[index]->Start()); |
| 1480 | user_buffers_[index].reset(); |
| 1481 | } |
| 1482 | } |
[email protected] | bdad42a4 | 2008-07-31 21:28:48 | [diff] [blame] | 1483 | return true; |
| 1484 | } |
| 1485 | |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1486 | bool EntryImpl::Flush(int index, int min_len) { |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1487 | Addr address(entry_.Data()->data_addr[index]); |
| 1488 | DCHECK(user_buffers_[index].get()); |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1489 | DCHECK(!address.is_initialized() || address.is_separate_file()); |
[email protected] | 42d5be69 | 2010-11-04 17:34:38 | [diff] [blame] | 1490 | DVLOG(3) << "Flush"; |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1491 | |
| 1492 | int size = std::max(entry_.Data()->data_size[index], min_len); |
[email protected] | 1bf71ed | 2010-10-01 16:31:10 | [diff] [blame] | 1493 | if (size && !address.is_initialized() && !CreateDataBlock(index, size)) |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1494 | return false; |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1495 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1496 | if (!entry_.Data()->data_size[index]) { |
| 1497 | DCHECK(!user_buffers_[index]->Size()); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1498 | return true; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1499 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1500 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1501 | address.set_value(entry_.Data()->data_addr[index]); |
| 1502 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1503 | int len = user_buffers_[index]->Size(); |
| 1504 | int offset = user_buffers_[index]->Start(); |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1505 | if (!len && !offset) |
| 1506 | return true; |
| 1507 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1508 | if (address.is_block_file()) { |
| 1509 | DCHECK_EQ(len, entry_.Data()->data_size[index]); |
| 1510 | DCHECK(!offset); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1511 | offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1512 | } |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1513 | |
[email protected] | 9cb851f | 2010-09-29 17:32:40 | [diff] [blame] | 1514 | File* file = GetBackingFile(address, index); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1515 | if (!file) |
| 1516 | return false; |
| 1517 | |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1518 | if (!file->Write(user_buffers_[index]->as_span().first( |
| 1519 | base::checked_cast<size_t>(len)), |
| 1520 | offset, nullptr, nullptr)) { |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1521 | return false; |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1522 | } |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1523 | user_buffers_[index]->Reset(); |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1524 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1525 | return true; |
| 1526 | } |
| 1527 | |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1528 | void EntryImpl::UpdateSize(int index, int old_size, int new_size) { |
| 1529 | if (entry_.Data()->data_size[index] == new_size) |
| 1530 | return; |
| 1531 | |
| 1532 | unreported_size_[index] += new_size - old_size; |
| 1533 | entry_.Data()->data_size[index] = new_size; |
| 1534 | entry_.set_modified(); |
| 1535 | } |
| 1536 | |
[email protected] | 84d4cee | 2009-06-18 23:46:58 | [diff] [blame] | 1537 | int EntryImpl::InitSparseData() { |
| 1538 | if (sparse_.get()) |
| 1539 | return net::OK; |
| 1540 | |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 1541 | // Use a local variable so that sparse_ never goes from 'valid' to NULL. |
Tsuyoshi Horo | f8861cb | 2022-07-05 23:50:20 | [diff] [blame] | 1542 | auto sparse = std::make_unique<SparseControl>(this); |
[email protected] | fb2622f | 2010-07-13 18:00:56 | [diff] [blame] | 1543 | int result = sparse->Init(); |
| 1544 | if (net::OK == result) |
| 1545 | sparse_.swap(sparse); |
| 1546 | |
[email protected] | 84d4cee | 2009-06-18 23:46:58 | [diff] [blame] | 1547 | return result; |
| 1548 | } |
| 1549 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 1550 | void EntryImpl::SetEntryFlags(uint32_t flags) { |
[email protected] | 7d4e3a8 | 2009-07-09 22:01:29 | [diff] [blame] | 1551 | entry_.Data()->flags |= flags; |
| 1552 | entry_.set_modified(); |
| 1553 | } |
| 1554 | |
avi | d0181f3 | 2015-12-10 19:41:47 | [diff] [blame] | 1555 | uint32_t EntryImpl::GetEntryFlags() { |
[email protected] | 7d4e3a8 | 2009-07-09 22:01:29 | [diff] [blame] | 1556 | return entry_.Data()->flags; |
| 1557 | } |
| 1558 | |
Tsuyoshi Horo | 3e843cb | 2022-07-21 03:09:54 | [diff] [blame] | 1559 | void EntryImpl::GetData(int index, |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1560 | base::HeapArray<uint8_t>* buffer, |
Tsuyoshi Horo | 3e843cb | 2022-07-21 03:09:54 | [diff] [blame] | 1561 | Addr* address) { |
[email protected] | 11fbca0b | 2013-06-02 23:37:21 | [diff] [blame] | 1562 | DCHECK(backend_.get()); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1563 | if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
| 1564 | !user_buffers_[index]->Start()) { |
[email protected] | e7e9932 | 2010-05-04 23:30:17 | [diff] [blame] | 1565 | // The data is already in memory, just copy it and we're done. |
[email protected] | 7d4e3a8 | 2009-07-09 22:01:29 | [diff] [blame] | 1566 | int data_len = entry_.Data()->data_size[index]; |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1567 | if (data_len <= user_buffers_[index]->Size()) { |
| 1568 | DCHECK(!user_buffers_[index]->Start()); |
Maks Orlovich | a00fe58 | 2025-06-12 22:17:20 | [diff] [blame] | 1569 | *buffer = base::HeapArray<uint8_t>::Uninit(data_len); |
| 1570 | buffer->as_span().copy_from_nonoverlapping( |
| 1571 | user_buffers_[index]->as_span().first(buffer->size())); |
[email protected] | e1fcf14 | 2010-08-23 18:47:25 | [diff] [blame] | 1572 | return; |
| 1573 | } |
[email protected] | 7d4e3a8 | 2009-07-09 22:01:29 | [diff] [blame] | 1574 | } |
| 1575 | |
| 1576 | // Bad news: we'd have to read the info from disk so instead we'll just tell |
| 1577 | // the caller where to read from. |
Piotr Bialecki | 62eb546 | 2024-12-10 21:00:30 | [diff] [blame] | 1578 | *buffer = {}; |
[email protected] | 7d4e3a8 | 2009-07-09 22:01:29 | [diff] [blame] | 1579 | address->set_value(entry_.Data()->data_addr[index]); |
| 1580 | if (address->is_initialized()) { |
| 1581 | // Prevent us from deleting the block from the backing store. |
| 1582 | backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
| 1583 | unreported_size_[index], 0); |
| 1584 | entry_.Data()->data_addr[index] = 0; |
| 1585 | entry_.Data()->data_size[index] = 0; |
| 1586 | } |
| 1587 | } |
| 1588 | |
Maks Orlovich | 98c00d32 | 2025-05-30 14:44:44 | [diff] [blame] | 1589 | base::span<char> EntryImpl::InternalKeySpan() const { |
| 1590 | CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 1591 | Addr key_addr(entry->Data()->long_key); |
| 1592 | CHECK(!key_addr.is_initialized()); |
| 1593 | |
| 1594 | int num_blocks = entry_.address().num_blocks(); |
| 1595 | size_t max_key_size = sizeof(EntryStore) - offsetof(EntryStore, key); |
| 1596 | if (num_blocks > 1) { |
| 1597 | max_key_size += sizeof(EntryStore) * (num_blocks - 1); |
| 1598 | } |
| 1599 | |
| 1600 | // Safety: this depends on BackendImpl::CreateEntryImpl allocating the right |
| 1601 | // amount of space using EntryImpl::NumBlocksForEntry, EntryImpl::SanityCheck |
| 1602 | // checking the consistency of fields when opening the entry, and `entry_` |
| 1603 | // mechanics making sure that entry_.address().num_blocks() * |
| 1604 | // sizeof(EntryStore) bytes are mapped. |
| 1605 | return UNSAFE_BUFFERS(base::span(entry->Data()->key, max_key_size)); |
| 1606 | } |
| 1607 | |
initial.commit | 586acc5fe | 2008-07-26 22:42:52 | [diff] [blame] | 1608 | } // namespace disk_cache |