17
17
* License.
18
18
*
19
19
*/
20
+ #include <linux/poll.h>
20
21
#include <linux/slab.h>
21
22
#include <linux/uaccess.h>
23
+ #include <linux/workqueue.h>
22
24
#include "tpm.h"
23
25
#include "tpm-dev.h"
24
26
27
+ static struct workqueue_struct * tpm_dev_wq ;
28
+ static DEFINE_MUTEX (tpm_dev_wq_lock );
29
+
30
+ static void tpm_async_work (struct work_struct * work )
31
+ {
32
+ struct file_priv * priv =
33
+ container_of (work , struct file_priv , async_work );
34
+ ssize_t ret ;
35
+
36
+ mutex_lock (& priv -> buffer_mutex );
37
+ priv -> command_enqueued = false;
38
+ ret = tpm_transmit (priv -> chip , priv -> space , priv -> data_buffer ,
39
+ sizeof (priv -> data_buffer ), 0 );
40
+
41
+ tpm_put_ops (priv -> chip );
42
+ if (ret > 0 ) {
43
+ priv -> data_pending = ret ;
44
+ mod_timer (& priv -> user_read_timer , jiffies + (120 * HZ ));
45
+ }
46
+ mutex_unlock (& priv -> buffer_mutex );
47
+ wake_up_interruptible (& priv -> async_wait );
48
+ }
49
+
25
50
static void user_reader_timeout (struct timer_list * t )
26
51
{
27
52
struct file_priv * priv = from_timer (priv , t , user_read_timer );
28
53
29
54
pr_warn ("TPM user space timeout is deprecated (pid=%d)\n" ,
30
55
task_tgid_nr (current ));
31
56
32
- schedule_work (& priv -> work );
57
+ schedule_work (& priv -> timeout_work );
33
58
}
34
59
35
- static void timeout_work (struct work_struct * work )
60
+ static void tpm_timeout_work (struct work_struct * work )
36
61
{
37
- struct file_priv * priv = container_of (work , struct file_priv , work );
62
+ struct file_priv * priv = container_of (work , struct file_priv ,
63
+ timeout_work );
38
64
39
65
mutex_lock (& priv -> buffer_mutex );
40
66
priv -> data_pending = 0 ;
41
67
memset (priv -> data_buffer , 0 , sizeof (priv -> data_buffer ));
42
68
mutex_unlock (& priv -> buffer_mutex );
69
+ wake_up_interruptible (& priv -> async_wait );
43
70
}
44
71
45
72
void tpm_common_open (struct file * file , struct tpm_chip * chip ,
46
- struct file_priv * priv )
73
+ struct file_priv * priv , struct tpm_space * space )
47
74
{
48
75
priv -> chip = chip ;
76
+ priv -> space = space ;
77
+
49
78
mutex_init (& priv -> buffer_mutex );
50
79
timer_setup (& priv -> user_read_timer , user_reader_timeout , 0 );
51
- INIT_WORK (& priv -> work , timeout_work );
52
-
80
+ INIT_WORK (& priv -> timeout_work , tpm_timeout_work );
81
+ INIT_WORK (& priv -> async_work , tpm_async_work );
82
+ init_waitqueue_head (& priv -> async_wait );
53
83
file -> private_data = priv ;
54
84
}
55
85
@@ -61,15 +91,17 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
61
91
int rc ;
62
92
63
93
del_singleshot_timer_sync (& priv -> user_read_timer );
64
- flush_work (& priv -> work );
94
+ flush_work (& priv -> timeout_work );
65
95
mutex_lock (& priv -> buffer_mutex );
66
96
67
97
if (priv -> data_pending ) {
68
98
ret_size = min_t (ssize_t , size , priv -> data_pending );
69
- rc = copy_to_user (buf , priv -> data_buffer , ret_size );
70
- memset (priv -> data_buffer , 0 , priv -> data_pending );
71
- if (rc )
72
- ret_size = - EFAULT ;
99
+ if (ret_size > 0 ) {
100
+ rc = copy_to_user (buf , priv -> data_buffer , ret_size );
101
+ memset (priv -> data_buffer , 0 , priv -> data_pending );
102
+ if (rc )
103
+ ret_size = - EFAULT ;
104
+ }
73
105
74
106
priv -> data_pending = 0 ;
75
107
}
@@ -79,13 +111,12 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
79
111
}
80
112
81
113
ssize_t tpm_common_write (struct file * file , const char __user * buf ,
82
- size_t size , loff_t * off , struct tpm_space * space )
114
+ size_t size , loff_t * off )
83
115
{
84
116
struct file_priv * priv = file -> private_data ;
85
- size_t in_size = size ;
86
- ssize_t out_size ;
117
+ int ret = 0 ;
87
118
88
- if (in_size > TPM_BUFSIZE )
119
+ if (size > TPM_BUFSIZE )
89
120
return - E2BIG ;
90
121
91
122
mutex_lock (& priv -> buffer_mutex );
@@ -94,56 +125,96 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
94
125
* tpm_read or a user_read_timer timeout. This also prevents split
95
126
* buffered writes from blocking here.
96
127
*/
97
- if (priv -> data_pending != 0 ) {
98
- mutex_unlock ( & priv -> buffer_mutex ) ;
99
- return - EBUSY ;
128
+ if (priv -> data_pending != 0 || priv -> command_enqueued ) {
129
+ ret = - EBUSY ;
130
+ goto out ;
100
131
}
101
132
102
- if (copy_from_user
103
- (priv -> data_buffer , (void __user * ) buf , in_size )) {
104
- mutex_unlock (& priv -> buffer_mutex );
105
- return - EFAULT ;
133
+ if (copy_from_user (priv -> data_buffer , buf , size )) {
134
+ ret = - EFAULT ;
135
+ goto out ;
106
136
}
107
137
108
- if (in_size < 6 ||
109
- in_size < be32_to_cpu (* ((__be32 * ) (priv -> data_buffer + 2 )))) {
110
- mutex_unlock ( & priv -> buffer_mutex ) ;
111
- return - EINVAL ;
138
+ if (size < 6 ||
139
+ size < be32_to_cpu (* ((__be32 * )(priv -> data_buffer + 2 )))) {
140
+ ret = - EINVAL ;
141
+ goto out ;
112
142
}
113
143
114
144
/* atomic tpm command send and result receive. We only hold the ops
115
145
* lock during this period so that the tpm can be unregistered even if
116
146
* the char dev is held open.
117
147
*/
118
148
if (tpm_try_get_ops (priv -> chip )) {
119
- mutex_unlock ( & priv -> buffer_mutex ) ;
120
- return - EPIPE ;
149
+ ret = - EPIPE ;
150
+ goto out ;
121
151
}
122
- out_size = tpm_transmit (priv -> chip , space , priv -> data_buffer ,
123
- sizeof (priv -> data_buffer ), 0 );
124
152
125
- tpm_put_ops (priv -> chip );
126
- if (out_size < 0 ) {
153
+ /*
154
+ * If in nonblocking mode schedule an async job to send
155
+ * the command return the size.
156
+ * In case of error the err code will be returned in
157
+ * the subsequent read call.
158
+ */
159
+ if (file -> f_flags & O_NONBLOCK ) {
160
+ priv -> command_enqueued = true;
161
+ queue_work (tpm_dev_wq , & priv -> async_work );
127
162
mutex_unlock (& priv -> buffer_mutex );
128
- return out_size ;
163
+ return size ;
129
164
}
130
165
131
- priv -> data_pending = out_size ;
166
+ ret = tpm_transmit (priv -> chip , priv -> space , priv -> data_buffer ,
167
+ sizeof (priv -> data_buffer ), 0 );
168
+ tpm_put_ops (priv -> chip );
169
+
170
+ if (ret > 0 ) {
171
+ priv -> data_pending = ret ;
172
+ mod_timer (& priv -> user_read_timer , jiffies + (120 * HZ ));
173
+ ret = size ;
174
+ }
175
+ out :
132
176
mutex_unlock (& priv -> buffer_mutex );
177
+ return ret ;
178
+ }
179
+
180
+ __poll_t tpm_common_poll (struct file * file , poll_table * wait )
181
+ {
182
+ struct file_priv * priv = file -> private_data ;
183
+ __poll_t mask = 0 ;
184
+
185
+ poll_wait (file , & priv -> async_wait , wait );
133
186
134
- /* Set a timeout by which the reader must come claim the result */
135
- mod_timer (& priv -> user_read_timer , jiffies + (120 * HZ ));
187
+ if (priv -> data_pending )
188
+ mask = EPOLLIN | EPOLLRDNORM ;
189
+ else
190
+ mask = EPOLLOUT | EPOLLWRNORM ;
136
191
137
- return in_size ;
192
+ return mask ;
138
193
}
139
194
140
195
/*
141
196
* Called on file close
142
197
*/
143
198
void tpm_common_release (struct file * file , struct file_priv * priv )
144
199
{
200
+ flush_work (& priv -> async_work );
145
201
del_singleshot_timer_sync (& priv -> user_read_timer );
146
- flush_work (& priv -> work );
202
+ flush_work (& priv -> timeout_work );
147
203
file -> private_data = NULL ;
148
204
priv -> data_pending = 0 ;
149
205
}
206
+
207
+ int __init tpm_dev_common_init (void )
208
+ {
209
+ tpm_dev_wq = alloc_workqueue ("tpm_dev_wq" , WQ_MEM_RECLAIM , 0 );
210
+
211
+ return !tpm_dev_wq ? - ENOMEM : 0 ;
212
+ }
213
+
214
+ void __exit tpm_dev_common_exit (void )
215
+ {
216
+ if (tpm_dev_wq ) {
217
+ destroy_workqueue (tpm_dev_wq );
218
+ tpm_dev_wq = NULL ;
219
+ }
220
+ }
0 commit comments