17
17
* License.
18
18
*
19
19
*/
20
+ #include <linux/poll.h>
20
21
#include <linux/slab.h>
21
22
#include <linux/uaccess.h>
23
+ #include <linux/workqueue.h>
22
24
#include "tpm.h"
23
25
#include "tpm-dev.h"
24
26
27
+ static struct workqueue_struct * tpm_dev_wq ;
28
+ static DEFINE_MUTEX (tpm_dev_wq_lock );
29
+
30
+ static void tpm_async_work (struct work_struct * work )
31
+ {
32
+ struct file_priv * priv =
33
+ container_of (work , struct file_priv , async_work );
34
+ ssize_t ret ;
35
+
36
+ mutex_lock (& priv -> buffer_mutex );
37
+ priv -> command_enqueued = false;
38
+ ret = tpm_transmit (priv -> chip , priv -> space , priv -> data_buffer ,
39
+ sizeof (priv -> data_buffer ), 0 );
40
+
41
+ tpm_put_ops (priv -> chip );
42
+ if (ret > 0 ) {
43
+ priv -> data_pending = ret ;
44
+ mod_timer (& priv -> user_read_timer , jiffies + (120 * HZ ));
45
+ }
46
+ mutex_unlock (& priv -> buffer_mutex );
47
+ wake_up_interruptible (& priv -> async_wait );
48
+ }
49
+
25
50
static void user_reader_timeout (struct timer_list * t )
26
51
{
27
52
struct file_priv * priv = from_timer (priv , t , user_read_timer );
28
53
29
54
pr_warn ("TPM user space timeout is deprecated (pid=%d)\n" ,
30
55
task_tgid_nr (current ));
31
56
32
- schedule_work (& priv -> work );
57
+ schedule_work (& priv -> timeout_work );
33
58
}
34
59
35
- static void timeout_work (struct work_struct * work )
60
+ static void tpm_timeout_work (struct work_struct * work )
36
61
{
37
- struct file_priv * priv = container_of (work , struct file_priv , work );
62
+ struct file_priv * priv = container_of (work , struct file_priv ,
63
+ timeout_work );
38
64
39
65
mutex_lock (& priv -> buffer_mutex );
40
66
priv -> data_pending = 0 ;
41
67
memset (priv -> data_buffer , 0 , sizeof (priv -> data_buffer ));
42
68
mutex_unlock (& priv -> buffer_mutex );
69
+ wake_up_interruptible (& priv -> async_wait );
43
70
}
44
71
45
72
void tpm_common_open (struct file * file , struct tpm_chip * chip ,
@@ -50,8 +77,9 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
50
77
51
78
mutex_init (& priv -> buffer_mutex );
52
79
timer_setup (& priv -> user_read_timer , user_reader_timeout , 0 );
53
- INIT_WORK (& priv -> work , timeout_work );
54
-
80
+ INIT_WORK (& priv -> timeout_work , tpm_timeout_work );
81
+ INIT_WORK (& priv -> async_work , tpm_async_work );
82
+ init_waitqueue_head (& priv -> async_wait );
55
83
file -> private_data = priv ;
56
84
}
57
85
@@ -63,15 +91,17 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
63
91
int rc ;
64
92
65
93
del_singleshot_timer_sync (& priv -> user_read_timer );
66
- flush_work (& priv -> work );
94
+ flush_work (& priv -> timeout_work );
67
95
mutex_lock (& priv -> buffer_mutex );
68
96
69
97
if (priv -> data_pending ) {
70
98
ret_size = min_t (ssize_t , size , priv -> data_pending );
71
- rc = copy_to_user (buf , priv -> data_buffer , ret_size );
72
- memset (priv -> data_buffer , 0 , priv -> data_pending );
73
- if (rc )
74
- ret_size = - EFAULT ;
99
+ if (ret_size > 0 ) {
100
+ rc = copy_to_user (buf , priv -> data_buffer , ret_size );
101
+ memset (priv -> data_buffer , 0 , priv -> data_pending );
102
+ if (rc )
103
+ ret_size = - EFAULT ;
104
+ }
75
105
76
106
priv -> data_pending = 0 ;
77
107
}
@@ -84,10 +114,9 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
84
114
size_t size , loff_t * off )
85
115
{
86
116
struct file_priv * priv = file -> private_data ;
87
- size_t in_size = size ;
88
- ssize_t out_size ;
117
+ int ret = 0 ;
89
118
90
- if (in_size > TPM_BUFSIZE )
119
+ if (size > TPM_BUFSIZE )
91
120
return - E2BIG ;
92
121
93
122
mutex_lock (& priv -> buffer_mutex );
@@ -96,56 +125,96 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
96
125
* tpm_read or a user_read_timer timeout. This also prevents split
97
126
* buffered writes from blocking here.
98
127
*/
99
- if (priv -> data_pending != 0 ) {
100
- mutex_unlock ( & priv -> buffer_mutex ) ;
101
- return - EBUSY ;
128
+ if (priv -> data_pending != 0 || priv -> command_enqueued ) {
129
+ ret = - EBUSY ;
130
+ goto out ;
102
131
}
103
132
104
- if (copy_from_user
105
- (priv -> data_buffer , (void __user * ) buf , in_size )) {
106
- mutex_unlock (& priv -> buffer_mutex );
107
- return - EFAULT ;
133
+ if (copy_from_user (priv -> data_buffer , buf , size )) {
134
+ ret = - EFAULT ;
135
+ goto out ;
108
136
}
109
137
110
- if (in_size < 6 ||
111
- in_size < be32_to_cpu (* ((__be32 * ) (priv -> data_buffer + 2 )))) {
112
- mutex_unlock ( & priv -> buffer_mutex ) ;
113
- return - EINVAL ;
138
+ if (size < 6 ||
139
+ size < be32_to_cpu (* ((__be32 * )(priv -> data_buffer + 2 )))) {
140
+ ret = - EINVAL ;
141
+ goto out ;
114
142
}
115
143
116
144
/* atomic tpm command send and result receive. We only hold the ops
117
145
* lock during this period so that the tpm can be unregistered even if
118
146
* the char dev is held open.
119
147
*/
120
148
if (tpm_try_get_ops (priv -> chip )) {
121
- mutex_unlock ( & priv -> buffer_mutex ) ;
122
- return - EPIPE ;
149
+ ret = - EPIPE ;
150
+ goto out ;
123
151
}
124
- out_size = tpm_transmit (priv -> chip , priv -> space , priv -> data_buffer ,
125
- sizeof (priv -> data_buffer ), 0 );
126
152
127
- tpm_put_ops (priv -> chip );
128
- if (out_size < 0 ) {
153
+ /*
154
+ * If in nonblocking mode schedule an async job to send
155
+ * the command return the size.
156
+ * In case of error the err code will be returned in
157
+ * the subsequent read call.
158
+ */
159
+ if (file -> f_flags & O_NONBLOCK ) {
160
+ priv -> command_enqueued = true;
161
+ queue_work (tpm_dev_wq , & priv -> async_work );
129
162
mutex_unlock (& priv -> buffer_mutex );
130
- return out_size ;
163
+ return size ;
131
164
}
132
165
133
- priv -> data_pending = out_size ;
166
+ ret = tpm_transmit (priv -> chip , priv -> space , priv -> data_buffer ,
167
+ sizeof (priv -> data_buffer ), 0 );
168
+ tpm_put_ops (priv -> chip );
169
+
170
+ if (ret > 0 ) {
171
+ priv -> data_pending = ret ;
172
+ mod_timer (& priv -> user_read_timer , jiffies + (120 * HZ ));
173
+ ret = size ;
174
+ }
175
+ out :
134
176
mutex_unlock (& priv -> buffer_mutex );
177
+ return ret ;
178
+ }
179
+
180
+ __poll_t tpm_common_poll (struct file * file , poll_table * wait )
181
+ {
182
+ struct file_priv * priv = file -> private_data ;
183
+ __poll_t mask = 0 ;
184
+
185
+ poll_wait (file , & priv -> async_wait , wait );
135
186
136
- /* Set a timeout by which the reader must come claim the result */
137
- mod_timer (& priv -> user_read_timer , jiffies + (120 * HZ ));
187
+ if (priv -> data_pending )
188
+ mask = EPOLLIN | EPOLLRDNORM ;
189
+ else
190
+ mask = EPOLLOUT | EPOLLWRNORM ;
138
191
139
- return in_size ;
192
+ return mask ;
140
193
}
141
194
142
195
/*
143
196
* Called on file close
144
197
*/
145
198
void tpm_common_release (struct file * file , struct file_priv * priv )
146
199
{
200
+ flush_work (& priv -> async_work );
147
201
del_singleshot_timer_sync (& priv -> user_read_timer );
148
- flush_work (& priv -> work );
202
+ flush_work (& priv -> timeout_work );
149
203
file -> private_data = NULL ;
150
204
priv -> data_pending = 0 ;
151
205
}
206
+
207
+ int __init tpm_dev_common_init (void )
208
+ {
209
+ tpm_dev_wq = alloc_workqueue ("tpm_dev_wq" , WQ_MEM_RECLAIM , 0 );
210
+
211
+ return !tpm_dev_wq ? - ENOMEM : 0 ;
212
+ }
213
+
214
+ void __exit tpm_dev_common_exit (void )
215
+ {
216
+ if (tpm_dev_wq ) {
217
+ destroy_workqueue (tpm_dev_wq );
218
+ tpm_dev_wq = NULL ;
219
+ }
220
+ }
0 commit comments