s4:torture/smb2: add smb2.notify.session-reconnect test
[Samba.git] / source3 / winbindd / wb_sids2xids.c
blob5b0dbbbebe0763519513c960123d02acd5571767
1 /*
2 Unix SMB/CIFS implementation.
3 async sids2xids
4 Copyright (C) Volker Lendecke 2011
5 Copyright (C) Michael Adam 2012
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "includes.h"
22 #include "winbindd.h"
23 #include "../libcli/security/security.h"
24 #include "idmap_cache.h"
25 #include "librpc/gen_ndr/ndr_wbint_c.h"
26 #include "lsa.h"
28 struct wb_sids2xids_state {
29 struct tevent_context *ev;
31 struct dom_sid *sids;
32 uint32_t num_sids;
34 struct id_map *cached;
36 struct dom_sid *non_cached;
37 uint32_t num_non_cached;
39 struct lsa_RefDomainList *domains;
40 struct lsa_TransNameArray *names;
43 * Domain array to use for the idmap call. The output from
44 * lookupsids cannot be used directly since for migrated
45 * objects the returned domain SID can be different that the
46 * original one. The new domain SID cannot be combined with
47 * the RID from the previous domain.
49 * The proper way would be asking for the correct RID in the
50 * new domain, but this approach avoids id mappings for
51 * invalid SIDs.
53 struct lsa_RefDomainList *idmap_doms;
55 struct wbint_TransIDArray ids;
59 static bool wb_sids2xids_in_cache(struct dom_sid *sid, struct id_map *map);
60 static void wb_sids2xids_lookupsids_done(struct tevent_req *subreq);
61 static void wb_sids2xids_done(struct tevent_req *subreq);
63 struct tevent_req *wb_sids2xids_send(TALLOC_CTX *mem_ctx,
64 struct tevent_context *ev,
65 const struct dom_sid *sids,
66 const uint32_t num_sids)
68 struct tevent_req *req, *subreq;
69 struct wb_sids2xids_state *state;
70 uint32_t i;
72 req = tevent_req_create(mem_ctx, &state,
73 struct wb_sids2xids_state);
74 if (req == NULL) {
75 return NULL;
78 state->ev = ev;
80 state->num_sids = num_sids;
82 state->sids = talloc_zero_array(state, struct dom_sid, num_sids);
83 if (tevent_req_nomem(state->sids, req)) {
84 return tevent_req_post(req, ev);
87 for (i = 0; i < num_sids; i++) {
88 sid_copy(&state->sids[i], &sids[i]);
91 state->cached = talloc_zero_array(state, struct id_map, num_sids);
92 if (tevent_req_nomem(state->cached, req)) {
93 return tevent_req_post(req, ev);
96 state->non_cached = talloc_array(state, struct dom_sid, num_sids);
97 if (tevent_req_nomem(state->non_cached, req)) {
98 return tevent_req_post(req, ev);
102 * Extract those sids that can not be resolved from cache
103 * into a separate list to be handed to id mapping, keeping
104 * the same index.
106 for (i=0; i<state->num_sids; i++) {
108 DEBUG(10, ("SID %d: %s\n", (int)i,
109 sid_string_dbg(&state->sids[i])));
111 if (wb_sids2xids_in_cache(&state->sids[i], &state->cached[i])) {
112 continue;
114 sid_copy(&state->non_cached[state->num_non_cached],
115 &state->sids[i]);
116 state->num_non_cached += 1;
119 if (state->num_non_cached == 0) {
120 tevent_req_done(req);
121 return tevent_req_post(req, ev);
124 subreq = wb_lookupsids_send(state, ev, state->non_cached,
125 state->num_non_cached);
126 if (tevent_req_nomem(subreq, req)) {
127 return tevent_req_post(req, ev);
129 tevent_req_set_callback(subreq, wb_sids2xids_lookupsids_done, req);
130 return req;
133 static bool wb_sids2xids_in_cache(struct dom_sid *sid, struct id_map *map)
135 struct unixid id;
136 bool expired;
138 if (!winbindd_use_idmap_cache()) {
139 return false;
141 if (idmap_cache_find_sid2unixid(sid, &id, &expired)) {
142 if (expired && is_domain_online(find_our_domain())) {
143 return false;
145 map->sid = sid;
146 map->xid = id;
147 map->status = ID_MAPPED;
148 return true;
150 return false;
153 static enum id_type lsa_SidType_to_id_type(const enum lsa_SidType sid_type);
155 static void wb_sids2xids_lookupsids_done(struct tevent_req *subreq)
157 struct tevent_req *req = tevent_req_callback_data(
158 subreq, struct tevent_req);
159 struct wb_sids2xids_state *state = tevent_req_data(
160 req, struct wb_sids2xids_state);
161 struct winbindd_child *child;
162 NTSTATUS status;
163 int i;
165 status = wb_lookupsids_recv(subreq, state, &state->domains,
166 &state->names);
167 TALLOC_FREE(subreq);
168 if (tevent_req_nterror(req, status)) {
169 return;
172 state->ids.num_ids = state->num_non_cached;
173 state->ids.ids = talloc_array(state, struct wbint_TransID,
174 state->num_non_cached);
175 if (tevent_req_nomem(state->ids.ids, req)) {
176 return;
179 state->idmap_doms = talloc_zero(state, struct lsa_RefDomainList);
180 if (tevent_req_nomem(state->idmap_doms, req)) {
181 return;
184 for (i=0; i<state->num_non_cached; i++) {
185 struct dom_sid dom_sid;
186 struct lsa_DomainInfo *info;
187 struct lsa_TranslatedName *n = &state->names->names[i];
188 struct wbint_TransID *t = &state->ids.ids[i];
190 sid_copy(&dom_sid, &state->non_cached[i]);
191 sid_split_rid(&dom_sid, &t->rid);
193 info = &state->domains->domains[n->sid_index];
194 t->type = lsa_SidType_to_id_type(n->sid_type);
195 t->domain_index = init_lsa_ref_domain_list(state,
196 state->idmap_doms,
197 info->name.string,
198 &dom_sid);
199 t->xid.id = UINT32_MAX;
200 t->xid.type = t->type;
203 child = idmap_child();
205 subreq = dcerpc_wbint_Sids2UnixIDs_send(
206 state, state->ev, child->binding_handle, state->idmap_doms,
207 &state->ids);
208 if (tevent_req_nomem(subreq, req)) {
209 return;
211 tevent_req_set_callback(subreq, wb_sids2xids_done, req);
214 static enum id_type lsa_SidType_to_id_type(const enum lsa_SidType sid_type)
216 enum id_type type;
218 switch(sid_type) {
219 case SID_NAME_COMPUTER:
220 case SID_NAME_USER:
221 type = ID_TYPE_UID;
222 break;
223 case SID_NAME_DOM_GRP:
224 case SID_NAME_ALIAS:
225 case SID_NAME_WKN_GRP:
226 type = ID_TYPE_GID;
227 break;
228 default:
229 type = ID_TYPE_NOT_SPECIFIED;
230 break;
233 return type;
237 static void wb_sids2xids_done(struct tevent_req *subreq)
239 struct tevent_req *req = tevent_req_callback_data(
240 subreq, struct tevent_req);
241 struct wb_sids2xids_state *state = tevent_req_data(
242 req, struct wb_sids2xids_state);
243 NTSTATUS status, result;
245 status = dcerpc_wbint_Sids2UnixIDs_recv(subreq, state, &result);
246 TALLOC_FREE(subreq);
247 if (any_nt_status_not_ok(status, result, &status)) {
248 tevent_req_nterror(req, status);
249 return;
251 tevent_req_done(req);
254 NTSTATUS wb_sids2xids_recv(struct tevent_req *req,
255 struct unixid *xids)
257 struct wb_sids2xids_state *state = tevent_req_data(
258 req, struct wb_sids2xids_state);
259 NTSTATUS status;
260 uint32_t i, num_non_cached;
262 if (tevent_req_is_nterror(req, &status)) {
263 DEBUG(5, ("wb_sids_to_xids failed: %s\n", nt_errstr(status)));
264 return status;
267 num_non_cached = 0;
269 for (i=0; i<state->num_sids; i++) {
270 struct unixid xid;
272 xid.id = UINT32_MAX;
274 if (state->cached[i].sid != NULL) {
275 xid = state->cached[i].xid;
276 } else {
277 xid = state->ids.ids[num_non_cached].xid;
279 idmap_cache_set_sid2unixid(
280 &state->non_cached[num_non_cached],
281 &xid);
283 num_non_cached += 1;
286 xids[i] = xid;
289 return NT_STATUS_OK;