1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
|
/*
* Copyright (c) James Peach 2005-2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "includes.h"
/* Cache priming module.
*
* The purpose of this module is to do RAID stripe width reads to prime the
* buffer cache to do zero-copy I/O for subsequent sendfile calls. The idea is
* to do a single large read at the start of the file to make sure that most or
* all of the file is pulled into the buffer cache. Subsequent I/Os have
* reduced latency.
*
* Tunables.
*
* cacheprime:rsize Amount of readahead in bytes. This should be a
* multiple of the RAID stripe width.
* cacheprime:debug Debug level at which to emit messages.
*/
#define READAHEAD_MIN (128 * 1024) /* min is 128 KiB */
#define READAHEAD_MAX (100 * 1024 * 1024) /* max is 100 MiB */
#define MODULE "cacheprime"
static int module_debug;
static ssize_t g_readsz = 0;
static void * g_readbuf = NULL;
/* Prime the kernel buffer cache with data from the specified file. We use
* per-fsp data to make sure we only ever do this once. If pread is being
* emulated by seek/read/seek, when this will suck quite a lot.
*/
static bool prime_cache(
struct vfs_handle_struct * handle,
files_struct * fsp,
int fd,
SMB_OFF_T offset,
size_t count)
{
SMB_OFF_T * last;
ssize_t nread;
last = VFS_ADD_FSP_EXTENSION(handle, fsp, SMB_OFF_T);
if (!last) {
return False;
}
if (*last == -1) {
/* Readahead disabled. */
return False;
}
if ((*last + g_readsz) > (offset + count)) {
/* Skip readahead ... we've already been here. */
return False;
}
DEBUG(module_debug,
("%s: doing readahead of %lld bytes at %lld for %s\n",
MODULE, (long long)g_readsz, (long long)*last,
fsp->fsp_name));
nread = sys_pread(fd, g_readbuf, g_readsz, *last);
if (nread < 0) {
*last = -1;
return False;
}
*last += nread;
return True;
}
static int cprime_connect(
struct vfs_handle_struct * handle,
const char * service,
const char * user)
{
module_debug = lp_parm_int(SNUM(handle->conn), MODULE, "debug", 100);
if (g_readbuf) {
/* Only allocate g_readbuf once. If the config changes and
* another client multiplexes onto this smbd, we don't want
* to risk memory corruption.
*/
return SMB_VFS_NEXT_CONNECT(handle, service, user);
}
g_readsz = conv_str_size(lp_parm_const_string(SNUM(handle->conn),
MODULE, "rsize", NULL));
if (g_readsz < READAHEAD_MIN) {
DEBUG(module_debug, ("%s: %ld bytes of readahead "
"requested, using minimum of %u\n",
MODULE, (long)g_readsz, READAHEAD_MIN));
g_readsz = READAHEAD_MIN;
} else if (g_readsz > READAHEAD_MAX) {
DEBUG(module_debug, ("%s: %ld bytes of readahead "
"requested, using maximum of %u\n",
MODULE, (long)g_readsz, READAHEAD_MAX));
g_readsz = READAHEAD_MAX;
}
if ((g_readbuf = SMB_MALLOC(g_readsz)) == NULL) {
/* Turn off readahead if we can't get a buffer. */
g_readsz = 0;
}
return SMB_VFS_NEXT_CONNECT(handle, service, user);
}
static ssize_t cprime_sendfile(
struct vfs_handle_struct * handle,
int tofd,
files_struct * fsp,
int fromfd,
const DATA_BLOB * header,
SMB_OFF_T offset,
size_t count)
{
if (g_readbuf && offset == 0) {
prime_cache(handle, fsp, fromfd, offset, count);
}
return SMB_VFS_NEXT_SENDFILE(handle, tofd, fsp, fromfd,
header, offset, count);
}
static ssize_t cprime_read(
vfs_handle_struct * handle,
files_struct * fsp,
int fd,
void * data,
size_t count)
{
SMB_OFF_T offset;
offset = SMB_VFS_LSEEK(fsp, fd, 0, SEEK_CUR);
if (offset >= 0 && g_readbuf) {
prime_cache(handle, fsp, fd, offset, count);
SMB_VFS_LSEEK(fsp, fd, offset, SEEK_SET);
}
return SMB_VFS_NEXT_READ(handle, fsp, fd, data, count);
}
static ssize_t cprime_pread(
vfs_handle_struct * handle,
files_struct * fsp,
void * data,
size_t count,
SMB_OFF_T offset)
{
if (g_readbuf) {
prime_cache(handle, fsp, fsp->fh->fd, offset, count);
}
return SMB_VFS_NEXT_PREAD(handle, fsp, data, count, offset);
}
static vfs_op_tuple cprime_ops [] =
{
{SMB_VFS_OP(cprime_sendfile),
SMB_VFS_OP_SENDFILE, SMB_VFS_LAYER_TRANSPARENT},
{SMB_VFS_OP(cprime_pread),
SMB_VFS_OP_PREAD, SMB_VFS_LAYER_TRANSPARENT},
{SMB_VFS_OP(cprime_read),
SMB_VFS_OP_READ, SMB_VFS_LAYER_TRANSPARENT},
{SMB_VFS_OP(cprime_connect),
SMB_VFS_OP_CONNECT, SMB_VFS_LAYER_TRANSPARENT},
{SMB_VFS_OP(NULL), SMB_VFS_OP_NOOP, SMB_VFS_LAYER_NOOP}
};
/* -------------------------------------------------------------------------
* Samba module initialisation entry point.
* -------------------------------------------------------------------------
*/
NTSTATUS vfs_cacheprime_init(void);
NTSTATUS vfs_cacheprime_init(void)
{
return smb_register_vfs(SMB_VFS_INTERFACE_VERSION, MODULE, cprime_ops);
}
/* vim: set sw=4 ts=4 tw=79 et: */
|